prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from datetime import timedelta
import numpy as np
import pytest
from pandas._libs import iNaT
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
IntervalIndex,
NaT,
Series,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesMissingData:
def test_categorical_nan_equality(self):
cat = Series(Categorical(["a", "b", "c", np.nan]))
exp = Series([True, True, True, False])
res = cat == cat
tm.assert_series_equal(res, exp)
def test_categorical_nan_handling(self):
# NaNs are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(
s.values.codes, np.array([0, 1, -1, 0], dtype=np.int8)
)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_isna_for_inf(self):
s = Series(["a", np.inf, np.nan, pd.NA, 1.0])
with pd.option_context("mode.use_inf_as_na", True):
r = s.isna()
dr = s.dropna()
e = Series([False, True, True, True, False])
de = Series(["a", 1.0], index=[0, 4])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_isnull_for_inf_deprecated(self):
# gh-17115
s = Series(["a", np.inf, np.nan, 1.0])
with pd.option_context("mode.use_inf_as_null", True):
r = s.isna()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(["a", 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_timedelta64_nan(self):
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
assert isna(td1[0])
assert td1[0].value == iNaT
td1[0] = td[0]
assert not isna(td1[0])
# GH#16674 iNaT is treated as an integer when given by the user
td1[1] = iNaT
assert not isna(td1[1])
assert td1.dtype == np.object_
assert td1[1] == iNaT
td1[1] = td[1]
assert not isna(td1[1])
td1[2] = NaT
assert isna(td1[2])
assert td1[2].value == iNaT
td1[2] = td[2]
assert not isna(td1[2])
# FIXME: don't leave commented-out
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# assert isna(result).sum() == 7
# NumPy limitation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= datetime_series <= 0.5
# expected = (datetime_series >= -0.5) & (datetime_series <= 0.5)
# tm.assert_series_equal(selector, expected)
def test_dropna_empty(self):
s = Series([], dtype=object)
assert len(s.dropna()) == 0
return_value = s.dropna(inplace=True)
assert return_value is None
assert len(s) == 0
# invalid axis
msg = "No axis named 1 for object type Series"
with pytest.raises(ValueError, match=msg):
s.dropna(axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series(
[
Timestamp("2011-01-01 10:00"),
pd.NaT,
Timestamp("2011-01-03 10:00"),
pd.NaT,
]
)
result = s.dropna()
expected = Series(
[ | Timestamp("2011-01-01 10:00") | pandas.Timestamp |
# Author: <NAME>
# Date: 26 November 2016
# Python version: 3.5
# Updated June 2018 by <NAME> (KTH dESA)
# Modified grid algorithm and population calibration to improve computational speed
import logging
import pandas as pd
from math import pi, exp, log, sqrt, ceil
# from pyproj import Proj
import numpy as np
from collections import defaultdict
import os
logging.basicConfig(format='%(asctime)s\t\t%(message)s', level=logging.DEBUG)
# general
LHV_DIESEL = 9.9445485 # (kWh/l) lower heating value
HOURS_PER_YEAR = 8760
# Columns in settlements file must match these exactly
SET_COUNTRY = 'Country' # This cannot be changed, lots of code will break
SET_X = 'X' # Coordinate in metres/kilometres
SET_Y = 'Y' # Coordinate in metres/kilometres
SET_X_DEG = 'X_deg' # Coordinates in degrees
SET_Y_DEG = 'Y_deg'
SET_POP = 'Pop' # Population in people per point (equally, people per km2)
SET_POP_CALIB = 'PopStartCalibrated' # Calibrated population to reference year, same units
SET_POP_FUTURE = 'PopFuture' # Project future population, same units
SET_GRID_DIST_CURRENT = 'GridDistCurrent' # Distance in km from current grid
SET_GRID_DIST_PLANNED = 'GridDistPlan' # Distance in km from current and future grid
SET_ROAD_DIST = 'RoadDist' # Distance in km from road network
SET_NIGHT_LIGHTS = 'VIIRS' # Intensity of night time lights (from NASA), range 0 - 63
SET_TRAVEL_HOURS = 'TravelHours' # Travel time to large city in hours
SET_GHI = 'GHI' # Global horizontal irradiance in kWh/m2/day
SET_WINDVEL = 'WindVel' # Wind velocity in m/s
SET_WINDCF = 'WindCF' # Wind capacity factor as percentage (range 0 - 1)
SET_HYDRO = 'Hydropower' # Hydropower potential in kW
SET_HYDRO_DIST = 'HydropowerDist' # Distance to hydropower site in km
SET_HYDRO_FID = 'HydropowerFID' # the unique tag for eah hydropower, to not over-utilise
SET_SUBSTATION_DIST = 'SubstationDist'
SET_ELEVATION = 'Elevation' # in metres
SET_SLOPE = 'Slope' # in degrees
SET_LAND_COVER = 'LandCover'
SET_SOLAR_RESTRICTION = 'SolarRestriction'
SET_ROAD_DIST_CLASSIFIED = 'RoadDistClassified'
SET_SUBSTATION_DIST_CLASSIFIED = 'SubstationDistClassified'
SET_ELEVATION_CLASSIFIED = 'ElevationClassified'
SET_SLOPE_CLASSIFIED = 'SlopeClassified'
SET_LAND_COVER_CLASSIFIED = 'LandCoverClassified'
SET_COMBINED_CLASSIFICATION = 'GridClassification'
SET_GRID_PENALTY = 'GridPenalty'
SET_URBAN = 'IsUrban' # Whether the site is urban (0 or 1)
SET_ENERGY_PER_HH = 'EnergyPerHH'
SET_NUM_PEOPLE_PER_HH = 'NumPeoplePerHH'
SET_ELEC_CURRENT = 'ElecStart' # If the site is currently electrified (0 or 1)
SET_ELEC_FUTURE = 'ElecFuture' # If the site has the potential to be 'easily' electrified in future
SET_NEW_CONNECTIONS = 'NewConnections' # Number of new people with electricity connections
SET_NEW_CONNECTIONS_PROD = 'New_Connections_Prod' # Number of new people with electricity connections plus productive uses corresponding
SET_MIN_GRID_DIST = 'MinGridDist'
SET_LCOE_GRID = 'Grid' # All lcoes in USD/kWh
SET_LCOE_SA_PV = 'SA_PV'
SET_LCOE_SA_DIESEL = 'SA_Diesel'
SET_LCOE_MG_WIND = 'MG_Wind'
SET_LCOE_MG_DIESEL = 'MG_Diesel'
SET_LCOE_MG_PV = 'MG_PV'
SET_LCOE_MG_HYDRO = 'MG_Hydro'
SET_LCOE_MG_HYBRID = 'MG_Hybrid'
SET_MIN_OFFGRID = 'MinimumOffgrid' # The technology with lowest lcoe (excluding grid)
SET_MIN_OVERALL = 'MinimumOverall' # Same as above, but including grid
SET_MIN_OFFGRID_LCOE = 'MinimumTechLCOE' # The lcoe value for minimum tech
SET_MIN_OVERALL_LCOE = 'MinimumOverallLCOE' # The lcoe value for overall minimum
SET_MIN_OVERALL_CODE = 'MinimumOverallCode' # And a code from 1 - 7 to represent that option
SET_MIN_CATEGORY = 'MinimumCategory' # The category with minimum lcoe (grid, minigrid or standalone)
SET_NEW_CAPACITY = 'NewCapacity' # Capacity in kW
SET_INVESTMENT_COST = 'InvestmentCost' # The investment cost in USD
# Columns in the specs file must match these exactly
SPE_COUNTRY = 'Country'
SPE_POP = 'Pop2016' # The actual population in the base year
SPE_URBAN = 'UrbanRatio2016' # The ratio of urban population (range 0 - 1) in base year
SPE_POP_FUTURE = 'Pop2030'
SPE_URBAN_FUTURE = 'UrbanRatio2030'
SPE_URBAN_MODELLED = 'UrbanRatioModelled' # The urban ratio in the model after calibration (for comparison)
SPE_URBAN_CUTOFF = 'UrbanCutOff' # The urban cutoff population calirated by the model, in people per km2
SPE_URBAN_GROWTH = 'UrbanGrowth' # The urban growth rate as a simple multplier (urban pop future / urban pop present)
SPE_RURAL_GROWTH = 'RuralGrowth' # Same as for urban
SPE_NUM_PEOPLE_PER_HH_RURAL = 'NumPeoplePerHHRural'
SPE_NUM_PEOPLE_PER_HH_URBAN = 'NumPeoplePerHHUrban'
SPE_DIESEL_PRICE_LOW = 'DieselPriceLow' # Diesel price in USD/litre
SPE_DIESEL_PRICE_HIGH = 'DieselPriceHigh' # Same, with a high forecast var
SPE_GRID_PRICE = 'GridPrice' # Grid price of electricity in USD/kWh
SPE_GRID_CAPACITY_INVESTMENT = 'GridCapacityInvestmentCost' # grid capacity investments costs from TEMBA USD/kW
SPE_GRID_LOSSES = 'GridLosses' # As a ratio (0 - 1)
SPE_BASE_TO_PEAK = 'BaseToPeak' # As a ratio (0 - 1)
SPE_EXISTING_GRID_COST_RATIO = 'ExistingGridCostRatio'
SPE_MAX_GRID_DIST = 'MaxGridDist'
SPE_ELEC = 'ElecActual' # Actual current percentage electrified population (0 - 1)
SPE_ELEC_MODELLED = 'ElecModelled' # The modelled version after calibration (for comparison)
SPE_MIN_NIGHT_LIGHTS = 'MinNightLights'
SPE_MAX_GRID_EXTENSION_DIST = 'MaxGridExtensionDist'
SPE_MAX_ROAD_DIST = 'MaxRoadDist'
SPE_POP_CUTOFF1 = 'PopCutOffRoundOne'
SPE_POP_CUTOFF2 = 'PopCutOffRoundTwo'
class Technology:
"""
Used to define the parameters for each electricity access technology, and to calculate the LCOE depending on
input parameters.
"""
start_year = 2016
end_year = 2030
discount_rate = 0.08
grid_cell_area = 1 # in km2, normally 1km2
mv_line_cost = 9000 # USD/km
lv_line_cost = 5000 # USD/km
mv_line_capacity = 50 # kW/line
lv_line_capacity = 10 # kW/line
lv_line_max_length = 30 # km
hv_line_cost = 53000 # USD/km
mv_line_max_length = 50 # km
hv_lv_transformer_cost = 5000 # USD/unit
mv_increase_rate = 0.1 # percentage
def __init__(self,
tech_life, # in years
base_to_peak_load_ratio,
distribution_losses=0, # percentage
connection_cost_per_hh=0, # USD/hh
om_costs=0.0, # OM costs as percentage of capital costs
capital_cost=0, # USD/kW
capacity_factor=1.0, # percentage
efficiency=1.0, # percentage
diesel_price=0.0, # USD/litre
grid_price=0.0, # USD/kWh for grid electricity
standalone=False,
mg_pv=False,
mg_wind=False,
mg_diesel=False,
mg_hydro=False,
grid_capacity_investment=0.0, # USD/kW for on-grid capacity investments (excluding grid itself)
diesel_truck_consumption=0, # litres/hour
diesel_truck_volume=0, # litres
om_of_td_lines=0): # percentage
self.distribution_losses = distribution_losses
self.connection_cost_per_hh = connection_cost_per_hh
self.base_to_peak_load_ratio = base_to_peak_load_ratio
self.tech_life = tech_life
self.om_costs = om_costs
self.capital_cost = capital_cost
self.capacity_factor = capacity_factor
self.efficiency = efficiency
self.diesel_price = diesel_price
self.grid_price = grid_price
self.standalone = standalone
self.mg_pv = mg_pv
self.mg_wind = mg_wind
self.mg_diesel = mg_diesel
self.mg_hydro = mg_hydro
self.grid_capacity_investment = grid_capacity_investment
self.diesel_truck_consumption = diesel_truck_consumption
self.diesel_truck_volume = diesel_truck_volume
self.om_of_td_lines = om_of_td_lines
def pv_diesel_hybrid(self,
energy_per_hh, # kWh/household/year as defined
max_ghi, # highest annual GHI value encountered in the GIS data
max_travel_hours, # highest value for travel hours encountered in the GIS data
diesel_no=1, # 50, # number of diesel generators simulated
pv_no=1, #70, # number of PV panel sizes simulated
n_chg=0.92, # charge efficiency of battery
n_dis=0.92, # discharge efficiency of battery
lpsp=0.05, # maximum loss of load allowed over the year, in share of kWh
battery_cost=150, # battery capital capital cost, USD/kWh of storage capacity
pv_cost=2490, # PV panel capital cost, USD/kW peak power
diesel_cost=550, # diesel generator capital cost, USD/kW rated power
pv_life=20, # PV panel expected lifetime, years
diesel_life=15, # diesel generator expected lifetime, years
pv_om=0.015, # annual OM cost of PV panels
diesel_om=0.1, # annual OM cost of diesel generator
k_t=0.005): # temperature factor of PV panels
ghi = pd.read_csv('Supplementary_files\GHI_hourly.csv', usecols=[4], sep=';', skiprows=21).as_matrix()
# hourly GHI values downloaded from SoDa for one location in the country
temp = pd.read_csv('Supplementary_files\Temperature_hourly.csv', usecols=[4], sep=';', skiprows=21).as_matrix()
# hourly temperature values downloaded from SoDa for one location in the country
hour_numbers = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23) * 365
LHV_DIESEL = 9.9445485
dod_max = 0.8 # maximum depth of discharge of battery
# the values below define the load curve for the five tiers. The values reflect the share of the daily demand
# expected in each hour of the day (sum of all values for one tier = 1)
tier5_load_curve = np.array([0.021008403, 0.021008403, 0.021008403, 0.021008403, 0.027310924, 0.037815126,
0.042016807, 0.042016807, 0.042016807, 0.042016807, 0.042016807, 0.042016807,
0.042016807, 0.042016807, 0.042016807, 0.042016807, 0.046218487, 0.050420168,
0.067226891, 0.084033613, 0.073529412, 0.052521008, 0.033613445, 0.023109244])
tier4_load_curve = np.array([0.017167382, 0.017167382, 0.017167382, 0.017167382, 0.025751073, 0.038626609,
0.042918455, 0.042918455, 0.042918455, 0.042918455, 0.042918455, 0.042918455,
0.042918455, 0.042918455, 0.042918455, 0.042918455, 0.0472103, 0.051502146,
0.068669528, 0.08583691, 0.075107296, 0.053648069, 0.034334764, 0.021459227])
tier3_load_curve = np.array([0.013297872, 0.013297872, 0.013297872, 0.013297872, 0.019060284, 0.034574468,
0.044326241, 0.044326241, 0.044326241, 0.044326241, 0.044326241, 0.044326241,
0.044326241, 0.044326241, 0.044326241, 0.044326241, 0.048758865, 0.053191489,
0.070921986, 0.088652482, 0.077570922, 0.055407801, 0.035460993, 0.019946809])
tier2_load_curve = np.array([0.010224949, 0.010224949, 0.010224949, 0.010224949, 0.019427403, 0.034764826,
0.040899796, 0.040899796, 0.040899796, 0.040899796, 0.040899796, 0.040899796,
0.040899796, 0.040899796, 0.040899796, 0.040899796, 0.04601227, 0.056237219,
0.081799591, 0.102249489, 0.089468303, 0.06390593, 0.038343558, 0.017893661])
tier1_load_curve = np.array([0, 0, 0, 0, 0.012578616, 0.031446541, 0.037735849, 0.037735849, 0.037735849,
0.037735849, 0.037735849, 0.037735849, 0.037735849, 0.037735849, 0.037735849,
0.037735849, 0.044025157, 0.062893082, 0.100628931, 0.125786164, 0.110062893,
0.078616352, 0.044025157, 0.012578616])
if energy_per_hh < 75:
load_curve = tier1_load_curve * energy_per_hh / 365
elif energy_per_hh < 365:
load_curve = tier2_load_curve * energy_per_hh / 365
elif energy_per_hh < 1241:
load_curve = tier3_load_curve * energy_per_hh / 365
elif energy_per_hh < 2993:
load_curve = tier4_load_curve * energy_per_hh / 365
else:
load_curve = tier5_load_curve * energy_per_hh / 365
def pv_diesel_capacities(pv_capacity, battery_size, diesel_capacity, initital_condition=False):
condition = 1
ren_limit = 0
break_hour = 17
while condition > lpsp:
dod = np.zeros(24)
battery_use = np.zeros(24) # Stores the amount of battery discharge during the day
fuel_result = 0
battery_life = 0
soc = 0.5
unmet_demand = 0
annual_diesel_gen = 0
for i in range(8760):
diesel_gen = 0
battery_use[hour_numbers[i]] = 0.0002 * soc # Battery self-discharge
soc *= 0.9998
t_cell = temp[i] + 0.0256 * ghi[i] # PV cell temperature
pv_gen = pv_capacity * 0.9 * ghi[i] / 1000 * (
1 - k_t * (t_cell - 298.15)) # PV generation in the hour
net_load = load_curve[hour_numbers[i]] - pv_gen # remaining load not met by PV panels
if net_load <= 0: # If pv generation is greater than load the excess energy is stored in battery
if battery_size > 0:
soc -= n_chg * net_load / battery_size
net_load = 0
max_diesel = min(diesel_capacity, net_load + (1 - soc) * battery_size / n_chg)
# Maximum aount of diesel needed to supply load and charge battery, limited by rated diesel capacity
# Below is the dispatch strategy for the diesel generator as described in word document
if break_hour + 1 > hour_numbers[i] > 4 and net_load > soc * battery_size * n_dis:
diesel_gen = min(diesel_capacity, max(0.4 * diesel_capacity, net_load))
elif 23 > hour_numbers[i] > break_hour and max_diesel > 0.40 * diesel_capacity:
diesel_gen = max_diesel
elif n_dis * soc * battery_size < net_load:
diesel_gen = max(0.4 * diesel_capacity, max_diesel)
if diesel_gen > 0: # Fuel consumption is stored
fuel_result += diesel_capacity * 0.08145 + diesel_gen * 0.246
annual_diesel_gen += diesel_gen
if (net_load - diesel_gen) > 0: # If diesel generator cannot meet load the battery is also used
if battery_size > 0:
soc -= (net_load - diesel_gen) / n_dis / battery_size
battery_use[hour_numbers[i]] += (net_load - diesel_gen) / n_dis / battery_size
if soc < 0: # If battery and diesel generator cannot supply load there is unmet demand
unmet_demand -= soc * n_dis * battery_size
battery_use[hour_numbers[i]] += soc
soc = 0
else: # If diesel generation is larger than load the excess energy is stored in battery
if battery_size > 0:
soc += (diesel_gen - net_load) * n_chg / battery_size
if battery_size == 0: # If no battery and diesel generation < net load there is unmet demand
unmet_demand += net_load - diesel_gen
soc = min(soc, 1) # Battery state of charge cannot be >1
dod[hour_numbers[i]] = 1 - soc # The depth of discharge in every hour of the day is storeed
if hour_numbers[i] == 23 and max(dod) > 0: # The battery wear during the last day is calculated
battery_life += sum(battery_use) / (531.52764 * max(0.1, (max(dod) * dod_max)) ** -1.12297)
condition = unmet_demand / energy_per_hh # lpsp is calculated
if initital_condition: # During the first calculation the minimum PV size with no diesel generator is calculated
if condition > lpsp:
pv_capacity *= (1 + unmet_demand / energy_per_hh / 4)
elif condition > lpsp or (annual_diesel_gen > (1 - ren_limit) * energy_per_hh): # For the remaining configurations the solution is considered unusable if lpsp criteria is not met
diesel_capacity = 99
condition = 0
battery_life = 1
elif condition < lpsp: # If lpsp criteria is met the expected battery life is stored
battery_life = np.round(1 / battery_life)
return pv_capacity, diesel_capacity, battery_size, fuel_result, battery_life
# Initial PV size when no diesel generator is used is calculated and used as reference
ref = pv_diesel_capacities(energy_per_hh / 3000, 2 * energy_per_hh / 365, 0, initital_condition=True)
battery_sizes = [0.3 * energy_per_hh / 365, 0.5 * energy_per_hh / 365, 0.75 * energy_per_hh / 365, energy_per_hh / 365, 2 * energy_per_hh / 365, 0] # [2 * energy_per_hh / 365, energy_per_hh / 365, 0]
ref_battery_size = np.zeros((len(battery_sizes), pv_no, diesel_no))
ref_panel_size = np.zeros((len(battery_sizes), pv_no, diesel_no))
ref_diesel_cap = np.zeros((len(battery_sizes), pv_no, diesel_no))
ref_fuel_result = np.zeros((len(battery_sizes), pv_no, diesel_no))
ref_battery_life = np.zeros((len(battery_sizes), pv_no, diesel_no))
# For the number of diesel, pv and battery capacities the lpsp, battery lifetime and fuel usage is calculated
for k in range(len(battery_sizes)):
for i in range(pv_no):
for j in range(diesel_no):
a = pv_diesel_capacities(ref[0] * (pv_no - i) / pv_no, battery_sizes[k],
j * max(load_curve) / diesel_no)
ref_panel_size[k, i, j] = a[0]
ref_diesel_cap[k, i, j] = a[1]
ref_battery_size[k, i, j] = a[2]
ref_fuel_result[k, i, j] = a[3]
ref_battery_life[k, i, j] = min(20, a[4]) # Battery life limited to maximum 20 years
# Neccessary information for calculation of LCOE is defined
project_life = self.end_year - self.start_year
ghi_steps = int(
ceil((max_ghi - 1000) / 50) + 1) # GHI values rounded to nearest 50 are used for reference matrix
diesel_cost_max = 2 * self.diesel_price * self.diesel_truck_consumption * max_travel_hours / self.diesel_truck_volume / LHV_DIESEL
diesel_steps = int(
ceil(diesel_cost_max * 100) + 1) # Diesel values rounded to 0.01 USD used for reference matrix
generation = np.ones(project_life) * energy_per_hh
generation[0] = 0
year = np.arange(project_life)
discount_factor = (1 + self.discount_rate) ** year
investment_table = np.zeros((ghi_steps, diesel_steps)) # Stores least-cost configuration investments
pv_table = np.zeros((ghi_steps, diesel_steps)) # Stores PV size for least-cost configuraton
diesel_table = np.zeros((ghi_steps, diesel_steps)) # Stores diesel capacity for least-cost configuration
lcoe_table = np.ones((ghi_steps, diesel_steps)) * 99 # Stores LCOE for least-cost configuration
choice_table = np.zeros((ghi_steps, diesel_steps))
# For each combination of GHI and diesel price the least costly configuration is calculated by iterating through
# the different configurations specified above
for i in range(ghi_steps):
pv_size = ref_panel_size * ghi.sum() / 1000 / (1000 + 50 * i)
for j in range(diesel_steps):
for k in range(pv_no):
for l in range(diesel_no):
for m in range(len(battery_sizes)):
investments = np.zeros(project_life)
salvage = np.zeros(project_life)
fuel_costs = np.ones(project_life) * ref_fuel_result[m, k, l] * (self.diesel_price + 0.01 * j)
investments[0] = pv_size[m, k, l] * pv_cost + ref_diesel_cap[m, k, l] * diesel_cost
salvage[-1] = ref_diesel_cap[m, k, l] * diesel_cost * (1 - project_life / diesel_life) + \
pv_size[m, k, l] * pv_cost * (1 - project_life / pv_life)
om = np.ones(project_life) * (
pv_size[m, k, l] * pv_cost * pv_om + ref_diesel_cap[m, k, l] * diesel_cost * diesel_om)
if pv_life < project_life:
investments[pv_life] = pv_size[m, k, l] * pv_cost
if diesel_life < project_life:
investments[diesel_life] = ref_diesel_cap[m, k, l] * diesel_cost
for n in range(project_life):
if year[n] % ref_battery_life[m, k, l] == 0:
investments[n] += ref_battery_size[m, k, l] * battery_cost / dod_max
salvage[-1] += (1 - (
(project_life % ref_battery_life[m, k, l]) / ref_battery_life[m, k, l])) * \
battery_cost * ref_battery_size[m, k, l] / dod_max + ref_diesel_cap[
m, k, l] * \
diesel_cost * (1 - (
project_life % diesel_life) / diesel_life) \
+ pv_size[m, k, l] * pv_cost * (1 - (project_life % pv_life) / pv_life)
discount_investments = (investments + fuel_costs - salvage + om) / discount_factor
discount_generation = generation / discount_factor
lcoe = np.sum(discount_investments) / np.sum(discount_generation)
if lcoe < lcoe_table[i, j]:
lcoe_table[i, j] = lcoe
pv_table[i, j] = pv_size[m, k, l]
diesel_table[i, j] = ref_diesel_cap[m, k, l]
investment_table[i, j] = np.sum(discount_investments)
choice_table[i, j] = (l + 1) * 10 + (k + 1) * 10000 + m + 1
# first number is PV size, second is diesel, third is battery
return lcoe_table, pv_table, diesel_table, investment_table, load_curve[19], choice_table
@classmethod
def set_default_values(cls, start_year, end_year, discount_rate, grid_cell_area, mv_line_cost, lv_line_cost,
mv_line_capacity, lv_line_capacity, lv_line_max_length, hv_line_cost, mv_line_max_length,
hv_lv_transformer_cost, mv_increase_rate):
cls.start_year = start_year
cls.end_year = end_year
cls.discount_rate = discount_rate
cls.grid_cell_area = grid_cell_area
cls.mv_line_cost = mv_line_cost
cls.lv_line_cost = lv_line_cost
cls.mv_line_capacity = mv_line_capacity
cls.lv_line_capacity = lv_line_capacity
cls.lv_line_max_length = lv_line_max_length
cls.hv_line_cost = hv_line_cost
cls.mv_line_max_length = mv_line_max_length
cls.hv_lv_transformer_cost = hv_lv_transformer_cost
cls.mv_increase_rate = mv_increase_rate
def get_lcoe(self, energy_per_hh, people, num_people_per_hh, additional_mv_line_length=0, capacity_factor=0,
mv_line_length=0, travel_hours=0, ghi=0, urban=0, get_capacity=False, mini_grid=False, pv=False,
urban_hybrid=0, rural_hybrid=0, get_investment_cost=False, mg_pv=False, mg_wind=False,
mg_hydro=False, mg_diesel=False, mg_hybrid=False):
"""
Calculates the LCOE depending on the parameters. Optionally calculates the investment cost instead.
The only required parameters are energy_per_hh, people and num_people_per_hh
additional_mv_line_length requried for grid
capacity_factor required for PV and wind
mv_line_length required for hydro
travel_hours required for diesel
"""
if people == 0:
# If there are no people, the investment cost is zero.
if get_investment_cost:
return 0
# Otherwise we set the people low (prevent div/0 error) and continue.
else:
people = 0.00001
# If a new capacity factor isn't given, use the class capacity factor (for hydro, diesel etc)
if capacity_factor == 0:
capacity_factor = self.capacity_factor
consumption = people / num_people_per_hh * energy_per_hh # kWh/year
average_load = consumption / (1 - self.distribution_losses) / HOURS_PER_YEAR # kW
if mg_hybrid and urban == 1:
peak_load = urban_hybrid[4] * consumption
# peak_load = people / num_people_per_hh * urban_hybrid[4] * (1 + self.distribution_losses)
elif mg_hybrid and urban == 0:
peak_load = rural_hybrid[4] * consumption
# peak_load = people / num_people_per_hh * rural_hybrid[4] * (1 + self.distribution_losses)
else:
peak_load = average_load / self.base_to_peak_load_ratio # kW
no_mv_lines = peak_load / self.mv_line_capacity
no_lv_lines = peak_load / self.lv_line_capacity
lv_networks_lim_capacity = no_lv_lines / no_mv_lines
lv_networks_lim_length = ((self.grid_cell_area / no_mv_lines) / (self.lv_line_max_length / sqrt(2))) ** 2
actual_lv_lines = min([people / num_people_per_hh, max([lv_networks_lim_capacity, lv_networks_lim_length])])
hh_per_lv_network = (people / num_people_per_hh) / (actual_lv_lines * no_mv_lines)
lv_unit_length = sqrt(self.grid_cell_area / (people / num_people_per_hh)) * sqrt(2) / 2
lv_lines_length_per_lv_network = 1.333 * hh_per_lv_network * lv_unit_length
total_lv_lines_length = no_mv_lines * actual_lv_lines * lv_lines_length_per_lv_network
line_reach = (self.grid_cell_area / no_mv_lines) / (2 * sqrt(self.grid_cell_area / no_lv_lines))
total_length_of_lines = min([line_reach, self.mv_line_max_length]) * no_mv_lines
additional_hv_lines = max(
[0, round(sqrt(self.grid_cell_area) / (2 * min([line_reach, self.mv_line_max_length])) / 10, 3) - 1])
hv_lines_total_length = (sqrt(self.grid_cell_area) / 2) * additional_hv_lines * sqrt(self.grid_cell_area)
num_transformers = additional_hv_lines + no_mv_lines + (no_mv_lines * actual_lv_lines)
generation_per_year = average_load * HOURS_PER_YEAR
# The investment and O&M costs are different for grid and non-grid solutions
if self.grid_price > 0 :
td_investment_cost = hv_lines_total_length * self.hv_line_cost + \
total_length_of_lines * self.mv_line_cost + \
total_lv_lines_length * self.lv_line_cost + \
num_transformers * self.hv_lv_transformer_cost + \
(people / num_people_per_hh) * self.connection_cost_per_hh + \
additional_mv_line_length * (
self.mv_line_cost * (1 + self.mv_increase_rate) **
((additional_mv_line_length / 5) - 1))
td_om_cost = td_investment_cost * self.om_of_td_lines
total_investment_cost = td_investment_cost
total_om_cost = td_om_cost
fuel_cost = self.grid_price
else:
total_lv_lines_length *= 0 if self.standalone else 0.75
mv_total_line_cost = self.mv_line_cost * mv_line_length
lv_total_line_cost = self.lv_line_cost * total_lv_lines_length
td_investment_cost = mv_total_line_cost + lv_total_line_cost + (
people / num_people_per_hh) * self.connection_cost_per_hh
td_om_cost = td_investment_cost * self.om_of_td_lines
installed_capacity = peak_load / capacity_factor
if self.standalone:
if self.diesel_price > 0:
if (installed_capacity / people / num_people_per_hh) < 1:
installed_capacity = 1 * people / num_people_per_hh
if installed_capacity / (people / num_people_per_hh) < 0.020:
capital_investment = installed_capacity * self.capital_cost[0.020]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[0.020] * self.om_costs * installed_capacity)
elif installed_capacity / (people / num_people_per_hh) < 0.050:
capital_investment = installed_capacity * self.capital_cost[0.050]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[0.050] * self.om_costs * installed_capacity)
elif installed_capacity / (people / num_people_per_hh) < 0.100:
capital_investment = installed_capacity * self.capital_cost[0.100]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[0.100] * self.om_costs * installed_capacity)
elif installed_capacity / (people / num_people_per_hh) < 0.200:
capital_investment = installed_capacity * self.capital_cost[0.200]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[0.200] * self.om_costs * installed_capacity)
else:
capital_investment = installed_capacity * self.capital_cost[0.300]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[0.300] * self.om_costs * installed_capacity)
elif self.mg_pv:
if installed_capacity < 50:
capital_investment = installed_capacity * self.capital_cost[50]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[50] * self.om_costs * installed_capacity)
elif installed_capacity < 75:
capital_investment = installed_capacity * self.capital_cost[75]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[75] * self.om_costs * installed_capacity)
elif installed_capacity < 100:
capital_investment = installed_capacity * self.capital_cost[100]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[100] * self.om_costs * installed_capacity)
else:
capital_investment = installed_capacity * self.capital_cost[200]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[200] * self.om_costs * installed_capacity)
elif self.mg_wind:
if installed_capacity < 100:
capital_investment = installed_capacity * self.capital_cost[100]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[100] * self.om_costs * installed_capacity)
elif installed_capacity < 1000:
capital_investment = installed_capacity * self.capital_cost[1000]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[1000] * self.om_costs * installed_capacity)
else:
capital_investment = installed_capacity * self.capital_cost[10000]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[10000] * self.om_costs * installed_capacity)
elif self.mg_hydro:
if installed_capacity < 1:
capital_investment = installed_capacity * self.capital_cost[1]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[1] * self.om_costs * installed_capacity)
elif installed_capacity < 100:
capital_investment = installed_capacity * self.capital_cost[100]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[100] * self.om_costs * installed_capacity)
else:
capital_investment = installed_capacity * self.capital_cost[5000]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[5000] * self.om_costs * installed_capacity)
elif self.mg_diesel:
if installed_capacity < 100:
capital_investment = installed_capacity * self.capital_cost[100]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[100] * self.om_costs * installed_capacity)
elif installed_capacity < 1000:
capital_investment = installed_capacity * self.capital_cost[1000]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[1000] * self.om_costs * installed_capacity)
elif installed_capacity < 5000:
capital_investment = installed_capacity * self.capital_cost[5000]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[5000] * self.om_costs * installed_capacity)
else:
capital_investment = installed_capacity * self.capital_cost[25000]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost[25000] * self.om_costs * installed_capacity)
elif mg_hybrid:
capital_investment = installed_capacity * self.capital_cost
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost * self.om_costs * installed_capacity)
# If a diesel price has been passed, the technology is diesel
if self.diesel_price > 0 and not mg_hybrid:
# And we apply the Szabo formula to calculate the transport cost for the diesel
# p = (p_d + 2*p_d*consumption*time/volume)*(1/mu)*(1/LHVd)
fuel_cost = (self.diesel_price + 2 * self.diesel_price * self.diesel_truck_consumption * travel_hours /
self.diesel_truck_volume) / LHV_DIESEL / self.efficiency
# Otherwise it's hydro/wind etc with no fuel cost
else:
fuel_cost = 0
# Perform the time-value LCOE calculation
project_life = self.end_year - self.start_year
reinvest_year = 0
# If the technology life is less than the project life, we will have to invest twice to buy it again
if self.tech_life < project_life:
reinvest_year = self.tech_life
year = np.arange(project_life)
el_gen = generation_per_year * np.ones(project_life)
el_gen[0] = 0
discount_factor = (1 + self.discount_rate) ** year
investments = np.zeros(project_life)
investments[0] = total_investment_cost
if reinvest_year:
investments[reinvest_year] = total_investment_cost
salvage = np.zeros(project_life)
used_life = project_life
if reinvest_year:
# so salvage will come from the remaining life after the re-investment
used_life = project_life - self.tech_life
salvage[-1] = total_investment_cost * (1 - used_life / self.tech_life)
operation_and_maintenance = total_om_cost * np.ones(project_life)
operation_and_maintenance[0] = 0
fuel = el_gen * fuel_cost
fuel[0] = 0
if mg_hybrid:
diesel_lookup = int(round(2 * self.diesel_price * self.diesel_truck_consumption *
travel_hours / self.diesel_truck_volume / LHV_DIESEL * 100))
renewable_lookup = int(round((ghi - 1000) / 50))
if urban == 1 and pv:
ref_table = urban_hybrid[0]
ref_investments = urban_hybrid[3]
ref_capacity = urban_hybrid[1] + urban_hybrid[2]
elif urban == 0 and pv:
ref_table = rural_hybrid[0]
ref_investments = rural_hybrid[3]
ref_capacity = rural_hybrid[1] + rural_hybrid[2]
add_lcoe = ref_table[renewable_lookup, diesel_lookup]
add_investments = ref_investments[renewable_lookup, diesel_lookup] * people / num_people_per_hh
add_capacity = ref_capacity[renewable_lookup, diesel_lookup] * people / num_people_per_hh
# So we also return the total investment cost for this number of people
if get_investment_cost:
discounted_investments = investments / discount_factor
if mini_grid:
return add_investments + np.sum(discounted_investments)
else:
return np.sum(discounted_investments) + self.grid_capacity_investment * peak_load
# return np.sum(discounted_investments) + self.grid_capacity_investment * peak_load
elif get_capacity:
return add_capacity
else:
discounted_costs = (investments + operation_and_maintenance + fuel - salvage) / discount_factor
discounted_generation = el_gen / discount_factor
if mini_grid:
return np.sum(discounted_costs) / np.sum(discounted_generation) + add_lcoe
else:
return np.sum(discounted_costs) / np.sum(discounted_generation)
# return np.sum(discounted_costs) / np.sum(discounted_generation)
def get_grid_table(self, energy_per_hh, num_people_per_hh, max_dist):
"""
Uses calc_lcoe to generate a 2D grid with the grid LCOEs, for faster access in teh electrification algorithm
"""
logging.info('Creating a grid table for {} kWh/hh/year'.format(energy_per_hh))
# Coarser resolution at the high end (just to catch the few places with exceptional population density)
# The electrification algorithm must round off with the same scheme
people_arr_direct = list(range(1000)) + list(range(1000, 10000, 10)) + list(range(10000, 350000, 1000))
elec_dists = range(0, int(max_dist) + 20) # add twenty to handle edge cases
grid_lcoes = pd.DataFrame(index=elec_dists, columns=people_arr_direct)
for people in people_arr_direct:
for additional_mv_line_length in elec_dists:
grid_lcoes[people][additional_mv_line_length] = self.get_lcoe(
energy_per_hh=energy_per_hh,
people=people,
num_people_per_hh=num_people_per_hh,
additional_mv_line_length=additional_mv_line_length)
return grid_lcoes.to_dict()
class SettlementProcessor:
"""
Processes the dataframe and adds all the columns to determine the cheapest option and the final costs and summaries
"""
def __init__(self, path):
try:
self.df = pd.read_csv(path)
except FileNotFoundError:
print('Could not find the calibrated and prepped csv file')
raise
try:
self.df[SET_GHI]
except ValueError:
self.df = pd.read_csv(path, sep=';')
try:
self.df[SET_GHI]
except ValueError:
print('Column "GHI" not found, check column names in calibrated csv-file')
raise
def condition_df(self):
"""
Do any initial data conditioning that may be required.
"""
logging.info('Ensure that columns that are supposed to be numeric are numeric')
self.df[SET_GHI] = pd.to_numeric(self.df[SET_GHI], errors='coerce')
self.df[SET_WINDVEL] = pd.to_numeric(self.df[SET_WINDVEL], errors='coerce')
self.df[SET_NIGHT_LIGHTS] = pd.to_numeric(self.df[SET_NIGHT_LIGHTS], errors='coerce')
self.df[SET_ELEVATION] = pd.to_numeric(self.df[SET_ELEVATION], errors='coerce')
self.df[SET_SLOPE] = pd.to_numeric(self.df[SET_SLOPE], errors='coerce')
self.df[SET_LAND_COVER] = pd.to_numeric(self.df[SET_LAND_COVER], errors='coerce')
self.df[SET_GRID_DIST_CURRENT] = pd.to_numeric(self.df[SET_GRID_DIST_CURRENT], errors='coerce')
self.df[SET_GRID_DIST_PLANNED] = | pd.to_numeric(self.df[SET_GRID_DIST_PLANNED], errors='coerce') | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""Copy of final.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1JsZAdNd67Fcn-S5prbt1w33R4wxE_9ep
"""
# Commented out IPython magic to ensure Python compatibility.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm_notebook as tqdm
# %matplotlib inline
"""## Data loading """
application_train = pd.read_csv("/content/drive/MyDrive/Home Credit/Data/application_train.csv")
application_test = pd.read_csv("/content/drive/MyDrive/Home Credit/Data/application_test.csv")
# pos_cash = pd.read_csv("/content/drive/MyDrive/Home Credit/Data/POS_CASH_balance.csv")
# installments = pd.read_csv("/content/drive/MyDrive/Home Credit/Data/installments_payments.csv")
# credit_df = pd.read_csv("/content/drive/MyDrive/Home Credit/Data/credit_card_balance.csv");
# b=pd.read_csv("/content/drive/MyDrive/Home Credit/Data/bureau.csv")
# bur=pd.read_csv("/content/drive/MyDrive/Home Credit/Data/bureau_balance.csv")
# prev=pd.read_csv("/content/drive/MyDrive/Home Credit/Data/previous_application.csv")
print("application_train.shape:",application_train.shape)
print("application_test.shape :",application_test.shape)
train_id = application_train["SK_ID_CURR"]
train_target = application_train["TARGET"]
test_id = application_test["SK_ID_CURR"]
application_train.head()
application_test.head()
"""we have one extra column in the application_train data , i.e TARGET """
application_train['TARGET'].value_counts()
fig = plt.figure(figsize =(15, 5))
plt.subplot(1,2,1)
plt.pie(application_train["TARGET"].value_counts(),labels = ["TARGET=0","TARGET=1"],autopct='%1.2f%%')
plt.subplot(1,2,2)
sns.countplot(x="TARGET",palette ="Set2",data=application_train)
plt.tight_layout()
plt.show()
"""Imbalanced dataset"""
application_train.dtypes.value_counts()
obj_type = application_train.dtypes[application_train.dtypes=='object'].index
float_type = application_train.dtypes[application_train.dtypes=='float64'].index
int_type = application_train.dtypes[application_train.dtypes=='int64'].index
def missing_data(data):
total = data.isnull().sum().sort_values(ascending = False)
percent = (data.isnull().sum()/data.isnull().count()*100).sort_values(ascending = False)
return pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
"""# Handling categorical data """
print(obj_type)
label_list = []
one_hot_list = []
drop_list = []
application_train[obj_type].head()
"""Missing Values in categorical data"""
missing_data(application_train[obj_type])
application_train["CODE_GENDER"].value_counts()
application_train['CODE_GENDER'].replace('XNA','F', inplace=True)
fig = plt.figure(figsize =(15, 5))
plt.subplot(1,2,1)
plt.pie(application_train["CODE_GENDER"].value_counts(),labels = ["Female","Male"],autopct='%1.2f%%')
plt.subplot(1,2,2)
sns.countplot(x="CODE_GENDER",hue="TARGET",palette ="Set2",data=application_train)
plt.tight_layout()
plt.show()
"""Observation : Male having difficulty in repaying is high compared to Female from the above graph
More No of Female Applicants than Male Applicants.
"""
def plot_hist(col):
plt.suptitle(col, fontsize=30)
application_train.loc[application_train['TARGET'] == 0, col].hist( )
application_train.loc[application_train['TARGET'] == 1, col].hist( )
plt.legend(['TARGET(0)', 'TARGET(1)'])
plt.show()
plot_hist("CODE_GENDER")
"""# NAME_CONTRACT_TYPE"""
fig = plt.figure(figsize =(15, 5))
plt.subplot(1,2,1)
plt.pie(application_train["NAME_CONTRACT_TYPE"].value_counts(),labels = ["Cash_Loans","Revolving_Loans"],autopct='%1.2f%%')
plt.subplot(1,2,2)
sns.countplot(x="NAME_CONTRACT_TYPE",hue="TARGET",palette ="Set2",data=application_train)
plt.tight_layout()
plt.show()
"""Cash Loans are More than Revolving loans .
## FLAG_OWN_CAR
"""
fig = plt.figure(figsize =(15, 3))
plt.subplot(1,2,1)
plt.pie(application_train["FLAG_OWN_CAR"].value_counts(),labels = ["YES","NO"],autopct='%1.2f%%')
plt.subplot(1,2,2)
sns.countplot(x="FLAG_OWN_CAR",hue="TARGET",palette ="Set2",data=application_train)
plt.tight_layout()
plt.show()
"""## FLAG_OWN_REALTY"""
fig = plt.figure(figsize =(15, 3))
plt.subplot(1,2,1)
plt.pie(application_train["FLAG_OWN_REALTY"].value_counts(),labels = ["YES","NO"],autopct='%1.2f%%')
plt.subplot(1,2,2)
sns.countplot(x="FLAG_OWN_REALTY",hue="TARGET",palette ="Set2",data=application_train)
plt.tight_layout()
plt.show()
"""## NAME_EDUCATION_TYPE"""
application_train["NAME_EDUCATION_TYPE"].value_counts().index
fig = plt.figure(figsize =(15, 3))
plt.subplot(1,2,1)
plt.pie(application_train["NAME_EDUCATION_TYPE"].value_counts(),labels =['Secondary / secondary special', 'Higher education',
'Incomplete higher', 'Lower secondary', 'Academic degree'],autopct='%1.2f%%')
plt.subplot(1,2,2)
sns.countplot(x="NAME_EDUCATION_TYPE",hue="TARGET",palette ="Set2",data=application_train)
plt.tight_layout()
plt.show()
"""## NAME_TYPE_SUITE"""
application_train["NAME_TYPE_SUITE"].value_counts().index
fig = plt.figure(figsize =(15, 5))
plt.subplot(1,2,1)
plt.pie(application_train["NAME_TYPE_SUITE"].value_counts(),labels =['Unaccompanied', 'Family', 'Spouse, partner', 'Children', 'Other_B',
'Other_A', 'Group of people'],autopct='%1.2f%%')
plt.subplot(1,2,2)
sns.countplot(x="NAME_TYPE_SUITE",hue="TARGET",palette ="Set2",data=application_train)
plt.tight_layout()
plt.show()
"""## NAME_INCOME_TYPE"""
l=application_train["NAME_INCOME_TYPE"].value_counts()
l
fig = plt.figure(figsize =(20, 5))
plt.subplot(1,2,1)
plt.pie(application_train["NAME_INCOME_TYPE"].value_counts(),labels =l.index,autopct='%1.2f%%')
plt.subplot(1,2,2)
sns.countplot(x="NAME_INCOME_TYPE",hue="TARGET",palette ="Set2",data=application_train)
plt.tight_layout()
plt.show()
"""## NAME_FAMILY_STATUS"""
l=application_train["NAME_FAMILY_STATUS"].value_counts()
l
fig = plt.figure(figsize =(15, 5))
plt.subplot(1,2,1)
plt.pie(application_train["NAME_FAMILY_STATUS"].value_counts(),labels =l.index,autopct='%1.2f%%')
plt.subplot(1,2,2)
sns.countplot(x="NAME_FAMILY_STATUS",hue="TARGET",palette ="Set2",data=application_train)
plt.tight_layout()
plt.show()
"""## NAME_HOUSING_TYPE"""
l= application_train["NAME_HOUSING_TYPE"].value_counts()
l
fig = plt.figure(figsize =(20, 5))
plt.subplot(1,2,1)
plt.pie(application_train["NAME_HOUSING_TYPE"].value_counts(),labels =l.index,autopct='%1.2f%%')
plt.subplot(1,2,2)
sns.countplot(x="NAME_HOUSING_TYPE",hue="TARGET",palette ="Set2",data=application_train)
plt.tight_layout()
plt.show()
"""## OCCUPATION_TYPE"""
l=application_train["OCCUPATION_TYPE"].value_counts()
l
"""## WEEKDAY_APPR_PROCESS_START
"""
l=application_train["WEEKDAY_APPR_PROCESS_START"].value_counts()
l
fig = plt.figure(figsize =(20, 5))
plt.subplot(1,2,1)
plt.pie(application_train["WEEKDAY_APPR_PROCESS_START"].value_counts(),labels =l.index,autopct='%1.2f%%')
plt.subplot(1,2,2)
sns.countplot(x="WEEKDAY_APPR_PROCESS_START",hue="TARGET",palette ="Set2",data=application_train)
plt.tight_layout()
plt.show()
"""## ORGANIZATION_TYPE"""
l=application_train["ORGANIZATION_TYPE"].value_counts()
l
"""## FONDKAPREMONT_MODE"""
l=application_train["FONDKAPREMONT_MODE"].value_counts()
l
fig = plt.figure(figsize =(20, 5))
plt.subplot(1,2,1)
plt.pie(application_train["FONDKAPREMONT_MODE"].value_counts(),labels =l.index,autopct='%1.2f%%')
plt.subplot(1,2,2)
sns.countplot(x="FONDKAPREMONT_MODE",hue="TARGET",palette ="Set2",data=application_train)
plt.tight_layout()
plt.show()
"""## HOUSETYPE_MODE"""
l=application_train["HOUSETYPE_MODE"].value_counts()
l
fig = plt.figure(figsize =(15, 5))
plt.subplot(1,2,1)
plt.pie(application_train["HOUSETYPE_MODE"].value_counts(),labels =l.index,autopct='%1.2f%%')
plt.subplot(1,2,2)
sns.countplot(x="HOUSETYPE_MODE",hue="TARGET",palette ="Set2",data=application_train)
plt.tight_layout()
plt.show()
"""## WALLSMATERIAL_MODE """
l=application_train["WALLSMATERIAL_MODE"].value_counts()
l
fig = plt.figure(figsize =(15, 5))
plt.subplot(1,2,1)
plt.pie(application_train["WALLSMATERIAL_MODE"].value_counts(),labels =l.index,autopct='%1.2f%%')
plt.subplot(1,2,2)
sns.countplot(x="WALLSMATERIAL_MODE",hue="TARGET",palette ="Set2",data=application_train)
plt.tight_layout()
plt.show()
"""## EMERGENCYSTATE_MODE"""
l=application_train["EMERGENCYSTATE_MODE"].value_counts()
l
fig = plt.figure(figsize =(10, 5))
plt.subplot(1,2,1)
plt.pie(application_train["EMERGENCYSTATE_MODE"].value_counts(),labels =l.index,autopct='%1.2f%%')
plt.subplot(1,2,2)
sns.countplot(x="EMERGENCYSTATE_MODE",hue="TARGET",palette ="Set2",data=application_train)
plt.tight_layout()
plt.show()
obj_type
label_list = ['NAME_CONTRACT_TYPE', 'CODE_GENDER', 'FLAG_OWN_CAR','FLAG_OWN_REALTY', 'ORGANIZATION_TYPE']
one_hot_list = ['NAME_TYPE_SUITE', 'NAME_INCOME_TYPE','NAME_EDUCATION_TYPE','NAME_FAMILY_STATUS','NAME_HOUSING_TYPE','OCCUPATION_TYPE', 'WEEKDAY_APPR_PROCESS_START','FONDKAPREMONT_MODE','WALLSMATERIAL_MODE']
drop_list = ["HOUSETYPE_MODE","EMERGENCYSTATE_MODE"]
le = LabelEncoder()
for x in label_list:
le.fit(application_train[x])
application_train[x] = le.transform(application_train[x])
application_test[x] = le.transform(application_test[x])
application_train.drop(drop_list,axis=1,inplace=True)
application_test.drop(drop_list,axis=1,inplace=True)
train_id = application_train["SK_ID_CURR"]
test_id = application_test["SK_ID_CURR"]
train_target = application_train["TARGET"]
application_train.drop(["SK_ID_CURR","TARGET"],axis=1,inplace=True)
application_test.drop(["SK_ID_CURR"],axis=1,inplace=True)
print(application_train.shape)
print(application_test.shape)
obj_type = application_train.dtypes[application_train.dtypes=='object'].index
obj_type
application_train = pd.get_dummies(application_train,columns=one_hot_list)
application_test = pd.get_dummies(application_test,columns=one_hot_list)
print(application_train.shape)
print(application_test.shape)
application_train, application_test = application_train.align(application_test, join ='inner', axis = 1)
print(application_train.shape)
print(application_test.shape)
application_test["SK_ID_CURR"] = test_id
application_train["SK_ID_CURR"] = train_id
application_train["TARGET"] = train_target
"""## Handling NUMERICAL DATA"""
application_train[int_type].head()
for x in int_type:
print(x)
"""## CNT_CHILDREN"""
l=application_train['CNT_CHILDREN'].value_counts()
l
fig = plt.figure(figsize =(10, 5))
plt.subplot(1,2,1)
plt.pie(application_train["CNT_CHILDREN"].value_counts(),labels =l.index,autopct='%1.2f%%')
plt.subplot(1,2,2)
sns.countplot(x="CNT_CHILDREN",hue="TARGET",palette ="Set2",data=application_train)
plt.tight_layout()
plt.show()
"""## DAYS_BIRTH
Client's age in days at the time of application ,time only relative to the application
"""
application_train['DAYS_BIRTH'].apply(lambda x : -1*x/365).plot.hist()
application_train['DAYS_BIRTH'].apply(lambda x : -1*x/365).describe()
"""## DAYS_EMPLOYED"""
application_train['DAYS_EMPLOYED'].describe()
application_train['DAYS_EMPLOYED'].apply(lambda x : -1*x/365).describe()
"""here we see that max no of days employed showing 1000 years and showing positive . these are outliers."""
application_train['DAYS_EMPLOYED'].apply(lambda x : x/365).plot.hist()
"""So the DAYS_EMPLOYED greater than the 100 years are considered as outliers , we must Delete the Outliers """
application_train['DAYS_EMPLOYED'].apply(lambda x : x/365).value_counts()
"""35869 rows has the Days employed value 1000 years ,changing these rows to nan"""
application_train['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace = True)
application_test['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace = True)
application_train['DAYS_EMPLOYED'].plot.hist()
"""## FLAG_MOBIL :
Did client provide mobile phone (1=YES, 0=NO)
"""
l=application_train['FLAG_MOBIL'].value_counts()
l
fig = plt.figure(figsize =(8, 3))
plt.subplot(1,2,1)
plt.pie(application_train["FLAG_MOBIL"].value_counts(),labels =l.index)
# plt.subplot(1,2,2)
# sns.countplot(x="CNT_CHILDREN",hue="TARGET",palette ="Set2",data=application_train)
# plt.tight_layout()
plt.show()
application_train['FLAG_MOBIL'].value_counts()
application_test['FLAG_MOBIL'].value_counts()
"""so droping the column FLAG_MOBIL"""
application_train.drop(['FLAG_MOBIL'],axis=1,inplace=True)
application_test.drop(['FLAG_MOBIL'],axis=1,inplace=True)
"""## FLAG_DOCUMENT_#
The below are the documents may the necessary documents ,They are submitted by all most all, so they do not contribute to any information in prediction
"""
d=['FLAG_DOCUMENT_2','FLAG_DOCUMENT_4','FLAG_DOCUMENT_6','FLAG_DOCUMENT_7','FLAG_DOCUMENT_9','FLAG_DOCUMENT_10','FLAG_DOCUMENT_11','FLAG_DOCUMENT_12','FLAG_DOCUMENT_13',
'FLAG_DOCUMENT_14','FLAG_DOCUMENT_15','FLAG_DOCUMENT_16','FLAG_DOCUMENT_17','FLAG_DOCUMENT_18','FLAG_DOCUMENT_19',
'FLAG_DOCUMENT_20','FLAG_DOCUMENT_21']
#for example :
l=application_train['FLAG_DOCUMENT_4'].value_counts()
l
fig = plt.figure(figsize =(8, 3))
plt.subplot(1,2,1)
plt.pie(application_train["FLAG_DOCUMENT_4"].value_counts(),labels =l.index)
plt.show()
l=application_train['FLAG_DOCUMENT_9'].value_counts()
l
fig = plt.figure(figsize =(8, 3))
plt.subplot(1,2,1)
plt.pie(application_train["FLAG_DOCUMENT_9"].value_counts(),labels =l.index)
plt.show()
application_train.drop(d,axis=1,inplace=True)
application_test.drop(d,axis=1,inplace=True)
print(application_train.shape)
print(application_test.shape)
"""## Handling Float"""
def plot_kde(col):
plt.suptitle(col, fontsize=30)
sns.kdeplot(application_train.loc[application_train['TARGET'] == 0, col],label='TARGET=0')
sns.kdeplot(application_train.loc[application_train['TARGET'] == 1, col],label='TARGET=1')
plt.legend(['TARGET(0)', 'TARGET(1)'])
plt.show()
def plot_hist(col):
plt.suptitle(col, fontsize=30)
application_train.loc[application_train['TARGET'] == 0, col].hist( )
application_train.loc[application_train['TARGET'] == 1, col].hist( )
plt.legend(['TARGET(0)', 'TARGET(1)'])
plt.show()
def kde_hist(col):
plt.suptitle(col, fontsize=30)
fig, ax = plt.subplots(1, 2, figsize=(16, 8))
sns.kdeplot(application_train.loc[application_train['TARGET'] == 0,col], ax=ax[0], label='TARGET(0)')
sns.kdeplot(application_train.loc[application_train['TARGET'] == 1,col], ax=ax[0], label='TARGET(1)')
ax[0].set_title('KDE plot')
ax[1].set_title('Histogram plot')
application_train.loc[application_train['TARGET'] == 0, col].hist(ax=ax[1])
application_train.loc[application_train["TARGET"] == 1, col].hist(ax=ax[1])
ax[1].legend(['TARGET(0)', 'TARGET(1)'])
plt.show()
"""## AMT_INCOME_TOTAL"""
missing_data(pd.DataFrame(application_train["AMT_INCOME_TOTAL"]))
application_train["AMT_INCOME_TOTAL"].describe()
plt.figure(figsize=(5,5))
sns.boxplot(application_train["AMT_INCOME_TOTAL"])
kde_hist("AMT_INCOME_TOTAL")
"""we can observe that higher income does not have any problem in repaying the loan
## AMT_CREDIT : Credit amount of the loan
"""
missing_data(pd.DataFrame(application_train["AMT_CREDIT"]))
plot_kde("AMT_CREDIT")
"""## AMT_ANNUITY : Loan annuity"""
missing_data(pd.DataFrame(application_train["AMT_ANNUITY"]))
plot_kde("AMT_ANNUITY")
plt.figure(figsize=(5,5))
sns.boxplot(application_train["AMT_ANNUITY"])
"""## OWN_CAR_AGE :Age of client's car"""
missing_data(pd.DataFrame(application_train["OWN_CAR_AGE"]))
application_train["OWN_CAR_AGE"].describe()
plot_kde("OWN_CAR_AGE")
"""considering nan value means no car ,so no age so filling with zero."""
application_train["OWN_CAR_AGE"].fillna(0,inplace=True)
application_test["OWN_CAR_AGE"].fillna(0,inplace=True)
"""## CNT_FAM_MEMBERS : How many family members does client have"""
missing_data(pd.DataFrame(application_train["CNT_FAM_MEMBERS"]))
application_train["CNT_FAM_MEMBERS"].value_counts()
plot_hist("CNT_FAM_MEMBERS")
"""we can observer that more the family members difficulty in paying loan .
## EXT_SOURCE_1 ,EXT_SOURCE_2,EXT_SOURCE_3 :
Normalized score from external data source
"""
missing_data(pd.DataFrame(application_train[["EXT_SOURCE_1","EXT_SOURCE_2","EXT_SOURCE_3"]]))
# #droping EXT_SOURCE_1
# application_train.drop(["EXT_SOURCE_1"],axis=1,inplace=True)
# application_test.drop(["EXT_SOURCE_1"],axis=1,inplace=True)
kde_hist("EXT_SOURCE_2")
kde_hist("EXT_SOURCE_3")
"""## TOTALAREA_MODE"""
missing_data(pd.DataFrame(application_train["TOTALAREA_MODE"]))
application_train["TOTALAREA_MODE"].describe()
application_train["TOTALAREA_MODE"].fillna(0,inplace=True)
application_test["TOTALAREA_MODE"].fillna(0,inplace=True)
kde_hist("TOTALAREA_MODE")
"""## OBS_30_CNT_SOCIAL_CIRCLE ,OBS_60 _CNT_SOCIAL_CIRCLE
How many observation of client's social surroundings with observable 30 DPD (days past due) default
How many observation of client's social surroundings with observable 60 DPD (days past due) default
"""
missing_data(pd.DataFrame(application_train[["OBS_30_CNT_SOCIAL_CIRCLE","OBS_60_CNT_SOCIAL_CIRCLE"]]))
application_train["OBS_30_CNT_SOCIAL_CIRCLE"].fillna(0,inplace=True)
application_test["OBS_30_CNT_SOCIAL_CIRCLE"].fillna(0,inplace=True)
application_train["OBS_60_CNT_SOCIAL_CIRCLE"].fillna(0,inplace=True)
application_test["OBS_60_CNT_SOCIAL_CIRCLE"].fillna(0,inplace=True)
application_train["OBS_30_CNT_SOCIAL_CIRCLE"].value_counts()
sns.boxplot(application_train["OBS_30_CNT_SOCIAL_CIRCLE"])
application_train["OBS_30_CNT_SOCIAL_CIRCLE"] = application_train["OBS_30_CNT_SOCIAL_CIRCLE"].apply(lambda x: 25 if x>25 else x)
application_test["OBS_30_CNT_SOCIAL_CIRCLE"]= application_test["OBS_30_CNT_SOCIAL_CIRCLE"].apply(lambda x: 25 if x>25 else x)
application_train["OBS_60_CNT_SOCIAL_CIRCLE"] = application_train["OBS_60_CNT_SOCIAL_CIRCLE"].apply(lambda x: 25 if x>25 else x)
application_test["OBS_60_CNT_SOCIAL_CIRCLE"]= application_test["OBS_60_CNT_SOCIAL_CIRCLE"].apply(lambda x: 25 if x>25 else x)
sns.boxplot(application_train["OBS_60_CNT_SOCIAL_CIRCLE"])
"""## DEF_30_CNT_SOCIAL_CIRCLE, DEF_60_CNT_SOCIAL_CIRCLE,
How many observation of client's social surroundings defaulted on 30 (days past due) DPD
How many observation of client's social surroundings defaulted on 60 (days past due) DPD
"""
missing_data(pd.DataFrame(application_train[["DEF_30_CNT_SOCIAL_CIRCLE","DEF_60_CNT_SOCIAL_CIRCLE"]]))
application_train["DEF_30_CNT_SOCIAL_CIRCLE"].fillna(0,inplace=True)
application_test["DEF_30_CNT_SOCIAL_CIRCLE"].fillna(0,inplace=True)
application_train["DEF_60_CNT_SOCIAL_CIRCLE"].fillna(0,inplace=True)
application_test["DEF_60_CNT_SOCIAL_CIRCLE"].fillna(0,inplace=True)
application_train["DEF_30_CNT_SOCIAL_CIRCLE"].value_counts()
application_train["DEF_60_CNT_SOCIAL_CIRCLE"].value_counts()
sns.boxplot(application_train["DEF_30_CNT_SOCIAL_CIRCLE"])
application_train["DEF_30_CNT_SOCIAL_CIRCLE"] = application_train["DEF_30_CNT_SOCIAL_CIRCLE"].apply(lambda x: 5 if x>4 else x)
application_test["DEF_30_CNT_SOCIAL_CIRCLE"]= application_test["DEF_30_CNT_SOCIAL_CIRCLE"].apply(lambda x: 5 if x>4 else x)
application_train["DEF_60_CNT_SOCIAL_CIRCLE"] = application_train["DEF_60_CNT_SOCIAL_CIRCLE"].apply(lambda x: 5 if x>4 else x)
application_test["DEF_60_CNT_SOCIAL_CIRCLE"]= application_test["DEF_60_CNT_SOCIAL_CIRCLE"].apply(lambda x: 5 if x>4 else x)
"""## DAYS_LAST_PHONE_CHANGE :
How many days before application did client change phone
"""
missing_data(pd.DataFrame(application_train["DAYS_LAST_PHONE_CHANGE"]))
application_train["DAYS_LAST_PHONE_CHANGE"].describe()
application_train["DAYS_LAST_PHONE_CHANGE"]=application_train["DAYS_LAST_PHONE_CHANGE"].apply(lambda x: x*-1)
application_test["DAYS_LAST_PHONE_CHANGE"]=application_test["DAYS_LAST_PHONE_CHANGE"].apply(lambda x: x*-1)
kde_hist("DAYS_LAST_PHONE_CHANGE")
"""## 'AMT_REQ_CREDIT_BUREAU_HOUR', 'AMT_REQ_CREDIT_BUREAU_DAY', 'AMT_REQ_CREDIT_BUREAU_WEEK', 'AMT_REQ_CREDIT_BUREAU_MON', 'AMT_REQ_CREDIT_BUREAU_QRT', 'AMT_REQ_CREDIT_BUREAU_YEAR'
Number of enquiries to Credit Bureau about the client _____ hour before application
"""
AMT_REQ = ['AMT_REQ_CREDIT_BUREAU_HOUR','AMT_REQ_CREDIT_BUREAU_DAY','AMT_REQ_CREDIT_BUREAU_WEEK','AMT_REQ_CREDIT_BUREAU_MON', 'AMT_REQ_CREDIT_BUREAU_QRT', 'AMT_REQ_CREDIT_BUREAU_YEAR']
missing_data(application_train[AMT_REQ])
application_train[AMT_REQ]=application_train[AMT_REQ].fillna(0)
application_test[AMT_REQ]=application_train[AMT_REQ].fillna(0)
AVG = [ 'APARTMENTS_AVG', 'BASEMENTAREA_AVG', 'YEARS_BEGINEXPLUATATION_AVG','YEARS_BUILD_AVG', 'COMMONAREA_AVG', 'ELEVATORS_AVG', 'ENTRANCES_AVG', 'FLOORSMAX_AVG', 'FLOORSMIN_AVG', 'LANDAREA_AVG',
'LIVINGAPARTMENTS_AVG', 'LIVINGAREA_AVG', 'NONLIVINGAPARTMENTS_AVG','NONLIVINGAREA_AVG']
MODE = ['APARTMENTS_MODE','BASEMENTAREA_MODE', 'YEARS_BEGINEXPLUATATION_MODE', 'YEARS_BUILD_MODE','COMMONAREA_MODE','ELEVATORS_MODE', 'ENTRANCES_MODE', 'FLOORSMAX_MODE', 'FLOORSMIN_MODE',
'LANDAREA_MODE', 'LIVINGAPARTMENTS_MODE', 'LIVINGAREA_MODE', 'NONLIVINGAPARTMENTS_MODE', 'NONLIVINGAREA_MODE']
MEDI = ['APARTMENTS_MEDI','BASEMENTAREA_MEDI','YEARS_BEGINEXPLUATATION_MEDI','YEARS_BUILD_MEDI','COMMONAREA_MEDI','ELEVATORS_MEDI','ENTRANCES_MEDI','FLOORSMAX_MEDI',
'FLOORSMIN_MEDI','LANDAREA_MEDI','LIVINGAPARTMENTS_MEDI','LIVINGAREA_MEDI','NONLIVINGAPARTMENTS_MEDI','NONLIVINGAREA_MEDI']
missing_data(application_train[AVG])
missing_data(application_train[MODE])
missing_data(application_train[MEDI])
obj_type = application_train.dtypes[application_train.dtypes=='object'].index
float_type = application_train.dtypes[application_train.dtypes=='float64'].index
int_type = application_train.dtypes[application_train.dtypes=='int64'].index
missing_data(application_train[float_type]).head(50)
# application_train.to_csv("/content/drive/MyDrive/Home Credit/preprocessed_data/app_train.csv",index=False)
# application_test.to_csv("/content/drive/MyDrive/Home Credit/preprocessed_data/app_test.csv",index=False)
"""## CREDIT_CARD"""
credit_df = pd.read_csv("/content/drive/MyDrive/Home Credit/Data/credit_card_balance.csv");
credit_features_train = application_train[["SK_ID_CURR","TARGET"]]
credit_features_test =pd.DataFrame(application_test["SK_ID_CURR"])
credit_df.dtypes.value_counts()
def plot_kde_2(col):
plt.suptitle(col, fontsize=30)
sns.kdeplot(credit_features_train.loc[credit_features_train['TARGET'] == 0, col],label='TARGET=0')
sns.kdeplot(credit_features_train.loc[credit_features_train['TARGET'] == 1, col],label='TARGET=1')
plt.legend(['TARGET(0)', 'TARGET(1)'])
plt.show()
def plot_hist_2(col):
plt.suptitle(col, fontsize=30)
credit_features_train.loc[credit_features_train['TARGET'] == 0, col].hist( )
credit_features_train.loc[credit_features_train['TARGET'] == 1, col].hist( )
plt.legend(['TARGET(0)', 'TARGET(1)'])
plt.show()
def kde_hist_2(col):
plt.suptitle(col, fontsize=30)
fig, ax = plt.subplots(1, 2, figsize=(16, 8))
sns.kdeplot(credit_features_train.loc[credit_features_train['TARGET'] == 0,col], ax=ax[0], label='TARGET(0)')
sns.kdeplot(credit_features_train.loc[credit_features_train['TARGET'] == 1,col], ax=ax[0], label='TARGET(1)')
ax[0].set_title('KDE plot')
ax[1].set_title('Histogram plot')
credit_features_train.loc[credit_features_train['TARGET'] == 0, col].hist(ax=ax[1])
credit_features_train.loc[credit_features_train["TARGET"] == 1, col].hist(ax=ax[1])
ax[1].legend(['TARGET(0)', 'TARGET(1)'])
plt.show()
def plot_count_2(col):
sns.countplot(x=col, data=credit_features_train)
missing_data(credit_df).head(23)
obj_type = credit_df.dtypes[credit_df.dtypes=='object'].index
float_type = credit_df.dtypes[credit_df.dtypes=='float64'].index
int_type = credit_df.dtypes[credit_df.dtypes=='int64'].index
credit_df[obj_type].head()
"""## NO of previous loans per coutomer"""
NO_LOANS = credit_df.groupby(by = ['SK_ID_CURR'])['SK_ID_PREV'].nunique().reset_index().rename(index = str, columns = {'SK_ID_PREV': 'NO_LOANS'})
NO_LOANS["NO_LOANS"].value_counts()
credit_features_train = credit_features_train.merge(NO_LOANS,on=["SK_ID_CURR"],how="left")
credit_features_test = credit_features_test.merge(NO_LOANS,on=["SK_ID_CURR"],how="left")
print(credit_features_train.shape)
print(credit_features_test.shape)
credit_features_train["NO_LOANS"].value_counts()
credit_features_train["NO_LOANS"].fillna(0,inplace=True)
credit_features_test["NO_LOANS"].fillna(0,inplace=True)
fig = plt.figure(figsize =(15, 3))
plt.subplot(1,2,1)
plt.pie(credit_features_train["NO_LOANS"].value_counts(),colors=['C1','C2','C3','C4','C5'], labels = ["NO_LOANS=0","NO_LOANS=1","NO_LOANS=2","NO_LOANS=3","NO_LOANS=4"])
plt.subplot(1,2,2)
sns.countplot(x="NO_LOANS",hue="TARGET",palette = "Set2",data=credit_features_train)
plt.tight_layout()
plt.show()
credit_df[int_type].head()
missing_data(credit_df[int_type])
AVG_DPD = credit_df.groupby(by= ['SK_ID_CURR'])['SK_DPD'].mean().reset_index().rename(index = str, columns = {'SK_DPD': 'AVG_DPD'})
credit_features_train = credit_features_train.merge(AVG_DPD, on = ['SK_ID_CURR'], how = 'left')
credit_features_test = credit_features_test.merge(AVG_DPD, on = ['SK_ID_CURR'], how = 'left')
print(credit_features_train.shape)
print(credit_features_test.shape)
credit_features_test.head()
credit_features_train.fillna(0,inplace=True)
credit_features_test.fillna(0,inplace=True)
missing_data(credit_df[float_type])
credit_df[float_type].head()
"""## NO OF INSTALMENTS PAID BY CUSTOMER PER LOAN
CNT_INSTALMENT_MATURE_CUM : Gives Number of paid installments on the previous credit.
"""
grp = credit_df.groupby(by = ['SK_ID_CURR', 'SK_ID_PREV'])['CNT_INSTALMENT_MATURE_CUM'].max().reset_index().rename(index = str, columns = {'CNT_INSTALMENT_MATURE_CUM': 'NO_INSTALMENTS'})
grp1 = grp.groupby(by = ['SK_ID_CURR'])['NO_INSTALMENTS'].sum().reset_index().rename(index = str, columns = {'NO_INSTALMENTS': 'TOTAL_INSTALMENTS'})
credit_features_train = credit_features_train.merge(grp1,on = ['SK_ID_CURR'], how = 'left')
credit_features_test = credit_features_test.merge(grp1,on=['SK_ID_CURR'],how='left')
credit_features_train.fillna(0,inplace=True)
credit_features_test.fillna(0,inplace=True)
print(credit_features_train.shape)
print(credit_features_test.shape)
credit_features_train.drop(["SK_ID_CURR","TARGET"],axis=1,inplace=True)
credit_features_test.drop(["SK_ID_CURR"],axis=1,inplace=True)
application_train=pd.concat([application_train, credit_features_train], axis=1)
application_test=pd.concat([application_test, credit_features_test],axis=1)
del credit_df
del credit_features_test
del credit_features_train
"""## installments"""
installments = pd.read_csv("/content/drive/MyDrive/Home Credit/Data/installments_payments.csv")
installment_train = application_train[["SK_ID_CURR","TARGET"]]
installment_test =pd.DataFrame(application_test["SK_ID_CURR"])
installments.shape
missing_data(installments)
installments.fillna(0,inplace=True)
installments['Days_Extra_Taken']=installments['DAYS_INSTALMENT']-installments['DAYS_ENTRY_PAYMENT']
installments['AMT_INSTALMENT_difference']=installments['AMT_INSTALMENT']-installments['AMT_PAYMENT']
installments.drop(["DAYS_INSTALMENT","AMT_INSTALMENT"],axis=1,inplace=True)
installments.drop(["DAYS_ENTRY_PAYMENT","AMT_PAYMENT"],axis=1,inplace=True)
temp = installments.drop(["NUM_INSTALMENT_VERSION","NUM_INSTALMENT_NUMBER"],axis=1)
grp = temp.groupby(["SK_ID_CURR"])["Days_Extra_Taken","AMT_INSTALMENT_difference"].max().reset_index().rename(index = str, columns = {"Days_Extra_Taken": 'MAX_Days_Extra_Taken',"AMT_INSTALMENT_difference":"MAX_AMT_INSTALMENT_difference"})
grp["MAX_Days_Extra_Taken"].describe()
installment_train = installment_train.merge(grp,on=["SK_ID_CURR"],how="left")
installment_test = installment_test.merge(grp,on=["SK_ID_CURR"],how="left")
del temp
del grp
del grp1
print(installment_test.shape)
print(installment_train.shape)
installment_test.fillna(0,inplace=True)
installment_train.fillna(0,inplace=True)
missing_data(installment_train)
installment_train.drop(["SK_ID_CURR","TARGET"],axis=1,inplace=True)
installment_test.drop(["SK_ID_CURR"],axis=1,inplace=True)
application_train=pd.concat([application_train, installment_train],axis=1)
application_test=pd.concat([application_test, installment_test], axis=1)
print(application_train.shape)
print(application_test.shape)
del installment_test
del installment_train
del installments
"""##POS_CASH :"""
pos_cash = pd.read_csv("/content/drive/MyDrive/Home Credit/Data/POS_CASH_balance.csv")
pos_train = application_train[["SK_ID_CURR","TARGET"]]
pos_test =pd.DataFrame(application_test["SK_ID_CURR"])
temp1 = pos_cash.groupby(by= ['SK_ID_CURR'])['SK_DPD'].mean().reset_index().rename(index = str, columns = {'SK_DPD': 'AVG_DPD'})
temp2 = pos_cash.groupby(by= ['SK_ID_CURR'])['SK_DPD_DEF'].mean().reset_index().rename(index = str, columns = {'SK_DPD_DEF': 'AVG_DPD_DEF'})
pos_train = pos_train.merge(temp1, on = ['SK_ID_CURR'], how = 'left')
pos_test = pos_test.merge(temp1, on = ['SK_ID_CURR'], how = 'left')
pos_train = pos_train.merge(temp2, on = ['SK_ID_CURR'], how = 'left')
pos_test = pos_test.merge(temp2, on = ['SK_ID_CURR'], how = 'left')
del temp1
del temp2
"""# CNT_INSTALMENT_MATURE_CUM : Gives Number of paid installments on the previous credit."""
grp = pos_cash.groupby(by = ['SK_ID_CURR', 'SK_ID_PREV'])['CNT_INSTALMENT'].max().reset_index().rename(index = str, columns = {'CNT_INSTALMENT': 'POS_PAID_INSTALMENTS'})
grp1 = grp.groupby(by = ['SK_ID_CURR'])['POS_PAID_INSTALMENTS'].sum().reset_index().rename(index = str, columns = {'POS_PAID_INSTALMENTS': 'POS_TOTAL_PAID_INSTALMENTS'})
grp2 = pos_cash.groupby(by = ['SK_ID_CURR', 'SK_ID_PREV'])['CNT_INSTALMENT_FUTURE'].min().reset_index().rename(index = str, columns = {'CNT_INSTALMENT_FUTURE': 'POS_NOTPAID_INSTALMENTS'})
grp3 = grp2.groupby(by = ['SK_ID_CURR'])['POS_NOTPAID_INSTALMENTS'].sum().reset_index().rename(index = str, columns = {'POS_NOTPAID_INSTALMENTS': 'POS_TOTAL_NOTPAID_INSTALMENTS'})
pos_train = pos_train.merge(grp1, on = ['SK_ID_CURR'], how = 'left')
pos_test = pos_test.merge(grp1, on = ['SK_ID_CURR'], how = 'left')
pos_train = pos_train.merge(grp3, on = ['SK_ID_CURR'], how = 'left')
pos_test = pos_test.merge(grp3, on = ['SK_ID_CURR'], how = 'left')
del grp1
del grp2
del grp3
del grp
POS_NO_LOANS = pos_cash.groupby(by = ['SK_ID_CURR'])['SK_ID_PREV'].nunique().reset_index().rename(index = str, columns = {'SK_ID_PREV': 'NO_LOANS'})
pos_train = pos_train.merge(POS_NO_LOANS, on = ['SK_ID_CURR'], how = 'left')
pos_test = pos_test.merge(POS_NO_LOANS, on = ['SK_ID_CURR'], how = 'left')
del POS_NO_LOANS
print(pos_train.shape)
print(pos_test.shape)
pos_train.fillna(0,inplace=True)
pos_test.fillna(0,inplace=True)
pos_train.drop(["SK_ID_CURR","TARGET"],axis=1,inplace=True)
pos_test.drop(["SK_ID_CURR"],axis=1,inplace=True)
application_train=pd.concat([application_train, pos_train],axis=1)
application_test=pd.concat([application_test, pos_test], axis=1)
del pos_cash
del pos_train
del pos_test
print(application_train.shape)
print(application_test.shape)
"""## bureau"""
b1= | pd.read_csv("/content/drive/MyDrive/Home Credit/Data/bureau.csv") | pandas.read_csv |
import finterstellar as fs
import pandas as pd
import numpy as np
import datetime as dt
class LoadData:
def read_investing_price(self, path, cd):
file_name = path + cd + ' Historical Data.csv'
df = pd.read_csv(file_name, index_col='Date')
return (df)
def create_portfolio_df(self, path, p_name, p_cd):
new_df = self.make_historical_price_df(path, p_cd)
prices_df = self.create_master_file(path, p_name, new_df)
prices_df = self.update_master_file(path, p_name, new_df)
return (prices_df)
def make_historical_price_df(self, path, s_cd):
cds = fs.str_list(s_cd)
dates = pd.Series()
for c in cds:
prices_df = self.read_investing_price(path, c)
prices_df = self.date_formatting(prices_df)
c = prices_df['Price']
dates_new = pd.Series(prices_df.index)
dates = dates.append(dates_new)
dates = dates.drop_duplicates().sort_values().reset_index()
dates = dates.drop(['index'], axis=1)
universe_df = pd.DataFrame(index=dates[0])
universe_df.index.name = 'Date'
for c in cds:
prices_df = self.read_investing_price(path, c)
prices_df = self.date_formatting(prices_df)
prices_df = self.price_df_trimming(prices_df, c)
universe_df[c] = prices_df[c]
universe_df
universe_df = universe_df.fillna(method='ffill')
return (universe_df)
def create_master_file(self, path, f_name, df):
file_name = path + 'fs ' + f_name + '.csv'
try:
f = open(file_name)
print('Updating master file')
f.close()
except IOError as e:
df.index = pd.to_datetime(df.index)
df.index.name = 'Date'
#df = df.fillna(method='ffill')
#today_date = pd.Timestamp.today().date().strftime('%y%m%d')
df.to_csv(file_name)
return (df)
def update_master_file(self, path, n, new_df):
try:
file_name = 'fs ' + n + '.csv'
master_df = self.read_master_file(path, n)
universe_df = new_df.combine_first(master_df)
universe_df.index.name = 'Date'
#universe_df = universe_df.fillna(method='ffill')
universe_df.to_csv(path + file_name)
except IOError as e:
print('Creating master file')
self.create_master_file(path, n, new_df)
universe_df = new_df
return (universe_df)
def read_master_file(self, path, n):
file_name = path + 'fs ' + n + '.csv'
prices_df = pd.read_csv(file_name, index_col='Date')
dates = []
for i in prices_df.index:
d = pd.to_datetime(i)
dates.append(d)
prices_df['Date'] = dates # Date 값 교체
prices_df = prices_df.set_index('Date')
return (prices_df)
def get_codes(self, prices_df):
codes = prices_df.columns.values
return (codes)
def read_raw_csv(self, path, n):
file_name = path + n + '.csv'
df = pd.read_csv(file_name, index_col='Date')
dates = []
for i in df.index:
#d = dt.datetime.strptime(i, '%Y-%m-%d')
d = pd.to_datetime(i)
dates.append(d)
df['Date'] = dates # Date 값 교체
df = df.set_index('Date')
df.sort_index(axis=0, inplace=True)
return (df)
def read_raw_excel(self, path, n, sheet=None):
file_name = path + n
df = pd.read_excel(file_name, index_col=0)
dates = []
for i in df.index:
d = | pd.to_datetime(i) | pandas.to_datetime |
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from datetime import datetime, timedelta # 记录outputs_time,记录循环用时
# matplotlib.use('Agg'
from IPython import display
#%matplotlib inline
import torch
from pprint import pprint
import itertools
from pathlib import Path
from stable_baselines3 import A2C
import sys, os
import hashlib
from finrl.neo_finrl.preprocessor.yahoodownloader import YahooDownloader
from finrl.neo_finrl.preprocessor.preprocessors import FeatureEngineer
#%load_ext autoreload
#%autoreload 2
mpl.rcParams.update({"font.size": 16})
# import sys
# sys.path.append("../FinRL-Library")
# import sys,os
# sys.path.append(os.path.dirname(os.path.realpath(".")))
import yfinance as yf
DATASETS_FULL_PATH = [
"dow_full.csv",
"nas_full.csv",
"sp_full.csv",
]
def data_split(df, start, end):
"""
split the dataset into training or testing using date
:param data: (df) pandas dataframe, start, end
:return: (df) pandas dataframe
"""
data = df[(df.date >= start) & (df.date < end)]
data = data.sort_values(["date", "tic"], ignore_index=True)
data.index = data.date.factorize()[0]
return data
def preprocess(
dataset_dir,
market_id,
start_date,
end_date,
ticker_list,
train_start,
train_end,
val_start,
val_end,
test_start,
test_end,
tech_indicators,
cache_dir,
):
ticker_list.sort()
encoder = hashlib.sha256()
encoder.update("_".join(list(ticker_list)).encode())
encoder.update("_".join(list(tech_indicators)).encode())
cache_path = cache_dir/ f"data_{market_id}_{start_date}_{end_date}_{encoder.hexdigest()}.csv"
# 缓存原始数据
if os.path.exists(cache_path):
processed_full = pd.read_csv(cache_path)
print(f"load data from cahe: {cache_path} .")
else:
"""
df = YahooDownloader(
start_date, #'2000-01-01',
end_date, # 2021-01-01,预计将改日期改为'2021-06-20'(今日日期)
ticker_list=ticker_list,
).fetch_data() # DOW_30_TICKER)道琼斯30只股票
assert len(df["tic"].unique().tolist()) == len(ticker_list)
"""
# load raw data from cache file
df = pd.read_csv(dataset_dir / DATASETS_FULL_PATH[market_id])
df = df[
(df.date >= start_date) & (df.date < end_date) & df.tic.isin(ticker_list)
]
# 数据预处理###############################
df.sort_values(["date", "tic"]).head()
# tech_indicators = ["macd", "rsi_30", "cci_30", "dx_30"]
fe = FeatureEngineer(
use_technical_indicator=True,
tech_indicator_list=tech_indicators,
use_turbulence=False,
user_defined_feature=False,
)
processed = fe.preprocess_data(df)
list_date = list(
pd.date_range(processed["date"].min(), processed["date"].max()).astype(str)
) # 成一个固定频率的时间索引
combination = list(itertools.product(list_date, ticker_list))
"""
1.pandas.date_range(start=None, end=None, periods=None, freq='D', tz=None, normalize=False, name=None, closed=None, **kwargs)
由于import pandas as pd,所以也可以写成pd.date_range(start=None, end=None)
该函数主要用于生成一个固定频率的时间索引,使用时必须指定start、end、periods中的两个参数值,否则报错。
2.df.astype('str') #改变整个df变成str数据类型
3.itertools.product(*iterables[, repeat]) # 对应有序的重复抽样过程
itertools.product(a,b),将a,b元组中的每个分量依次乘开。
"""
processed_full = | pd.DataFrame(combination, columns=["date", "tic"]) | pandas.DataFrame |
"""
Plot full psychometric functions as a function of choice history,
and separately for 20/80 and 80/20 blocks
"""
import pandas as pd
import numpy as np
import time, os
import matplotlib.pyplot as plt
import seaborn as sns
import datajoint as dj
from IPython import embed as shell # for debugging
## INITIALIZE A FEW THINGS
sns.set(style="darkgrid", context="paper", font='Arial')
sns.set(style="darkgrid", context="paper")
sns.set(style="darkgrid", context="paper", font_scale=1.3)
# import wrappers etc
from ibl_pipeline import reference, subject, action, acquisition, data, behavior
from ibl_pipeline.utils import psychofit as psy
from ibl_pipeline.analyses import behavior as behavioral_analyses
from dj_tools import *
new_criteria = dj.create_virtual_module('analyses', 'user_anneurai_analyses')
figpath = os.path.join(os.path.expanduser('~'), 'Data/Figures_IBL')
# ================================= #
# 1. get training status from original DJ table
# ================================= #
use_subjects = subject.Subject * subject.SubjectProject & 'subject_project = "ibl_neuropixel_brainwide_01"'
use_sessions = use_subjects * subject.SubjectLab * subject.Subject.aggr(behavior.TrialSet,
session_start_time='max(session_start_time)')
# QUICK PIE PLOT
plt.close('all')
sns.set_palette("cubehelix")
fig, ax = plt.subplots(1, 2, figsize=(13, 13))
# ================================= #
# v0
# ================================= #
sess = behavioral_analyses.SessionTrainingStatus() * use_sessions
df1 = pd.DataFrame(sess.fetch(as_dict=True))
df2 = df1.groupby(['training_status'])['subject_uuid'].count().reset_index()
df2.index = df2.training_status
df2 = df2.reindex(['-', 'over40days', 'training in progress', 'trained', '.', 'ready for ephys', '..'])
df2.fillna(0, inplace=True)
original = df2.copy()
print(df2)
ax[0].pie(df2['subject_uuid'], autopct='%1.2f%%', labels=df2.index)
ax[0].set_title('Original criteria (v0), n = %d'%df2['subject_uuid'].sum())
# ================================= #
# v1
# ================================= #
# sns.set_palette("Set2")
sess = new_criteria.SessionTrainingStatus() * use_sessions
df3 = pd.DataFrame(sess.fetch(as_dict=True))
df4 = df3.groupby(['training_status'])['subject_uuid'].count().reset_index()
df4.index = df4.training_status
df4 = df4.reindex(['unbiasable', 'untrainable', 'in_training', 'trained_1a', 'trained_1b',
'ready4ephysrig', 'ready4recording'])
df4.fillna(0, inplace=True)
print(df4)
new = df4.copy()
ax[1].pie(df4['subject_uuid'], autopct='%1.2f%%', labels=df4.index)
ax[1].set_title('Alternative criteria (v1), n = %d'%df4['subject_uuid'].sum())
fig.savefig(os.path.join(figpath, "training_success.pdf"))
fig.savefig(os.path.join(figpath, "training_success.png"), dpi=300)
plt.close('all')
# ================================= #
# COMPARE PER LAB
# ================================= #
# WRITE A SUMMARY DOCUMENT
df5 = | pd.merge(df1, df3, on='subject_nickname') | pandas.merge |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
self.assertTrue(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
self.assertTrue(wp.major_axis.equals(itemb.index[5:]))
# use constructor
assert_panel_equal(Panel(d), Panel.from_dict(d))
assert_panel_equal(Panel(d2), Panel.from_dict(d2))
assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# a pathological case
d4 = {'A': None, 'B': None}
# TODO: unused?
wp4 = Panel.from_dict(d4) # noqa
assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
# cast
dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
for k, v in compat.iteritems(d))
result = Panel(dcasted, dtype=int)
expected = Panel(dict((k, v.astype(int))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
result = Panel(dcasted, dtype=np.int32)
expected = Panel(dict((k, v.astype(np.int32))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
data = dict((k, v.values) for k, v in self.panel.iteritems())
result = Panel(data)
exp_major = Index(np.arange(len(self.panel.major_axis)))
self.assertTrue(result.major_axis.equals(exp_major))
result = Panel(data, items=self.panel.items,
major_axis=self.panel.major_axis,
minor_axis=self.panel.minor_axis)
assert_panel_equal(result, self.panel)
data['ItemC'] = self.panel['ItemC']
result = Panel(data)
assert_panel_equal(result, self.panel)
# corner, blow up
data['ItemB'] = data['ItemB'][:-1]
self.assertRaises(Exception, Panel, data)
data['ItemB'] = self.panel['ItemB'].values[:, :-1]
self.assertRaises(Exception, Panel, data)
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
self.assertTrue(list(p.items) == keys)
p = Panel.from_dict(d)
self.assertTrue(list(p.items) == keys)
def test_constructor_resize(self):
data = self.panel._data
items = self.panel.items[:-1]
major = self.panel.major_axis[:-1]
minor = self.panel.minor_axis[:-1]
result = Panel(data, items=items, major_axis=major, minor_axis=minor)
expected = self.panel.reindex(items=items, major=major, minor=minor)
assert_panel_equal(result, expected)
result = Panel(data, items=items, major_axis=major)
expected = self.panel.reindex(items=items, major=major)
assert_panel_equal(result, expected)
result = Panel(data, items=items)
expected = self.panel.reindex(items=items)
assert_panel_equal(result, expected)
result = Panel(data, minor_axis=minor)
expected = self.panel.reindex(minor=minor)
assert_panel_equal(result, expected)
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
self.assertEqual(panel['foo'].values.dtype, np.object_)
self.assertEqual(panel['A'].values.dtype, np.float64)
def test_constructor_error_msgs(self):
def testit():
Panel(np.random.randn(3, 4, 5), lrange(4), lrange(5), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(4, 5, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(4), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 4, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(5), lrange(4))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 5, 4\)",
testit)
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
assert (conformed.index.equals(self.panel.major_axis))
assert (conformed.columns.equals(self.panel.minor_axis))
def test_convert_objects(self):
# GH 4937
p = Panel(dict(A=dict(a=['1', '1.0'])))
expected = Panel(dict(A=dict(a=[1, 1.0])))
result = p._convert(numeric=True, coerce=True)
assert_panel_equal(result, expected)
def test_dtypes(self):
result = self.panel.dtypes
expected = Series(np.dtype('float64'), index=self.panel.items)
assert_series_equal(result, expected)
def test_apply(self):
# GH1148
# ufunc
applied = self.panel.apply(np.sqrt)
self.assertTrue(assert_almost_equal(applied.values, np.sqrt(
self.panel.values)))
# ufunc same shape
result = self.panel.apply(lambda x: x * 2, axis='items')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='major_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='minor_axis')
expected = self.panel * 2
| assert_panel_equal(result, expected) | pandas.util.testing.assert_panel_equal |
# -*- coding: utf-8 -*-
# Original Code by <NAME> for VOST Portugal
# 18 MAR 2022
# -----------------------------------------------
# LIBRARIES
# -----------------------------------------------
# Import Dash and Dash Bootstrap Components
import dash
import dash_bootstrap_components as dbc
from dash import Input, Output, dcc, html
# Import Core Libraries
import pandas as pd
import plotly.express as px
# -----------------------------------------------
# APP STARTS HERE
# -----------------------------------------------
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP], title='CONFIRM - BAJATT 2022', update_title=None,
meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=0.7, minimum-scale=0.4"}],
)
server = app.server
CONFIRM_LOGO = app.get_asset_url('CONFIRM_Logotype.png')
color_map = {
"WARNING":"#C81D25",
"ACIDENTE":"#4F5D75",
"AVARIA MECÂNICA":"#DE6E4B",
"DESISTÊNCIA CONFIRMADA":"#2D3142",
"DESISTÊNCIA NÃO CONFIRMADA":"#242424"
}
app.layout = dbc.Container(
[
dbc.Row(
[
# AUTOMATIC UPDATER
dcc.Interval(
id='interval-component',
interval=20*1000, # in milliseconds
n_intervals=0
),
dbc.Col(
[
dbc.Row(
[
dbc.Row(html.Hr()),
dbc.Col(width=2,xs=12, sm=12,md=1,lg=1,xl=1),
dbc.Col(html.H3("BAJA TT 2022"),width=4,xs=12, sm=12,md=4,lg=4,xl=4),
dbc.Col(width=4,xs=12, sm=12,md=1,lg=4,xl=4),
dbc.Col(html.Img(src=CONFIRM_LOGO, height="37px"),width=2,xs=12, sm=12,md=1,lg=1,xl=1), # CONFIRM LOGO - DO NOT REMOVE
],
),
],
),
dbc.Row(
[
dbc.Col(width=2,xs=12, sm=12,md=1,lg=2,xl=1),
dbc.Col(
html.P("CONFIRM by VOST PORTUGAL ")
),
],
),
],
style={"height": "20%", "background-color": "#1D1E2C"},
),
dbc.Row(
[
dbc.Col(
dcc.Graph(id='map'), width=2,xs=12, sm=12,md=12,lg=12,xl=4,
),
dbc.Col(
dbc.Row(
[
dbc.Card(
[
dbc.CardHeader("TOTAL INCIDENTS", style={"background": "#FF495C","color":"white"}),
dbc.CardBody(
[
html.H6("TOTAL INCIDENTES", style={"color":"#FF495C"}, className="card-title"),
html.H4(id="totals"),
],
),
],
),
dbc.Card(
[
dbc.CardHeader("TOTAL WARNINGS", style={"background": "#C81D25","color":"white"}),
dbc.CardBody(
[
html.H6("RACE DIRECTOR", style={"color":"#C81D25"}, className="card-title"),
html.H4(id="total_warnings"),
],
),
],
),
dbc.Card(
[
dbc.CardHeader("BREAKDOWNS", style={"background": "#DE6E4B","color":"white"}),
dbc.CardBody(
[
html.H6("AVARIAS", style={"color":"#DE6E4B"}, className="card-title"),
html.H4(id="total_breakdowns"),
],
),
],
),
],
),
width=2,xs=12, sm=12,md=12,lg=6,xl=2,
),
dbc.Col(
dbc.Row(
[
dbc.Card(
[
dbc.CardHeader("ACCIDENTS", style={"background": "#4F5D75","color":"white"}),
dbc.CardBody(
[
html.H6("ACIDENTES", style={"color":"#4F5D75"}, className="card-title"),
html.H4(id="total_accidents"),
],
),
],
),
dbc.Card(
[
dbc.CardHeader("CONFIRMED OUT OF RACE", style={"background": "#2D3142","color":"white"}),
dbc.CardBody(
[
html.H6("DESISTÊNCIA", style={"color":"#2D3142"}, className="card-title"),
html.H4(id="total_gaveup_confirmed"),
],
),
],
),
dbc.Card(
[
dbc.CardHeader("NON-CONFIRMED OUT OF RACE", style={"background": "#242424","color":"white"}),
dbc.CardBody(
[
html.H6("DESISTÊNCIA NC", style={"color":"#242424"}, className="card-title"),
html.H4(id="total_gaveup_nconfirmed"),
],
),
],
),
],
),
width=2,xs=12, sm=12,md=12,lg=6,xl=2,
),
dbc.Col(
dbc.Row(dcc.Graph(id='pie')),
width=3,xs=12, sm=12,md=12,lg=12,xl=3,
),
],
),
dbc.Row(
[
dbc.Col(
[
dbc.Row(dcc.Graph(id='timeline'))
],
),
],
style={"height": "10%", "background-color": "#242424"},
),
dbc.Row(
[
dbc.Col(
[
dbc.Row(
[
dbc.Col(width=4,xs=12, sm=12,md=4,lg=4,xl=4),
dbc.Col(
dbc.Row(
[
dbc.Row(dbc.Col(width=12),),
dbc.Row(html.H6("POWERED BY VOST PORTUGAL",style={"align":"center"}),),
dbc.Row(html.H6("VOST PORTUGAL for ACP MOTORSPORTS",style={"align":"center"}),),
dbc.Row(html.H6("CC BY-NC-SA 2022",style={"align":"center"}),),
],
),
),
],
style={"height": "20%", "background-color": "#242424"},
),
],
),
],
style={"height": "30%", "background-color": "#242424"},
),
],
style={"width":"100vw","height": "97vh"},
)
# DEFINE CALL BACKS
@app.callback(
Output(component_id="map",component_property="figure"),
Output(component_id="totals",component_property="children"),
Output(component_id="total_warnings",component_property="children"), # returns variable
Output(component_id="total_breakdowns",component_property="children"),
Output(component_id="total_accidents",component_property="children"),
Output(component_id="total_gaveup_confirmed",component_property="children"), # returns variable
Output(component_id="total_gaveup_nconfirmed",component_property="children"), # returns table # returns table
Output(component_id="pie",component_property="figure"),
Output(component_id="timeline",component_property="figure"),
Input(component_id="interval-component", component_property="n_intervals"), # Triggers Call Back based on time update
)
# WHAT HAPPENS WHEN CALL BACK IS TRIGGERED
def confirmUupdate(value):
# DATA TREATMENT
df_ss1_cc = pd.read_csv('ss1_cc.csv')
df_live_incidents = pd.read_csv('https://docs.google.com/spreadsheets/d/e/2PACX-1vT_L10XsTy6OEUN6OOOdEbLDeMzAW000x2bmgXF5acnOY6v8lJpooMiOg4uFQ3e3CI2MfFdDB07I5X_/pub?gid=812677681&single=true&output=csv')
df_live_cc = | pd.read_csv('https://docs.google.com/spreadsheets/d/e/2PACX-1vT_L10XsTy6OEUN6OOOdEbLDeMzAW000x2bmgXF5acnOY6v8lJpooMiOg4uFQ3e3CI2MfFdDB07I5X_/pub?gid=1268287201&single=true&output=csv') | pandas.read_csv |
import unittest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from msticpy.analysis.anomalous_sequence import sessionize
class TestSessionize(unittest.TestCase):
def setUp(self):
self.df1 = pd.DataFrame({"UserId": [], "time": [], "operation": []})
self.df1_with_ses_col = pd.DataFrame(
{"UserId": [], "time": [], "operation": [], "session_ind": []}
)
self.df1_sessionized = pd.DataFrame(
{
"UserId": [],
"time_min": [],
"time_max": [],
"operation_list": [],
"duration": [],
"number_events": [],
}
)
self.df2 = pd.DataFrame(
{
"UserId": [1, 1, 2, 3, 1, 2, 2],
"time": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-06 11:06:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
],
"operation": ["A", "B", "C", "A", "A", "B", "C"],
}
)
self.df2_with_ses_col_1 = pd.DataFrame(
{
"UserId": [1, 1, 1, 2, 2, 2, 3],
"time": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 00:01:00"),
| pd.to_datetime("2020-01-03 01:00:00") | pandas.to_datetime |
# pylint: disable=E1101
from pandas.util.py3compat import StringIO, BytesIO, PY3
from datetime import datetime
from os.path import split as psplit
import csv
import os
import sys
import re
import unittest
import nose
from numpy import nan
import numpy as np
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
ExcelFile, TextFileReader, TextParser)
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
network,
ensure_clean)
import pandas.util.testing as tm
import pandas as pd
import pandas.lib as lib
from pandas.util import py3compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
from pandas._parser import OverflowError
from pandas.io.parsers import (ExcelFile, ExcelWriter, read_csv)
def _skip_if_no_xlrd():
try:
import xlrd
ver = tuple(map(int, xlrd.__VERSION__.split(".")[:2]))
if ver < (0, 9):
raise nose.SkipTest('xlrd not installed, skipping')
except ImportError:
raise nose.SkipTest('xlrd not installed, skipping')
def _skip_if_no_xlwt():
try:
import xlwt
except ImportError:
raise nose.SkipTest('xlwt not installed, skipping')
def _skip_if_no_openpyxl():
try:
import openpyxl
except ImportError:
raise nose.SkipTest('openpyxl not installed, skipping')
def _skip_if_no_excelsuite():
_skip_if_no_xlrd()
_skip_if_no_xlwt()
_skip_if_no_openpyxl()
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)[:10]
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])[:10]
_tsframe = tm.makeTimeDataFrame()[:5]
_mixed_frame = _frame.copy()
_mixed_frame['foo'] = 'bar'
class ExcelTests(unittest.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
def test_parse_cols_int(self):
_skip_if_no_openpyxl()
_skip_if_no_xlrd()
suffix = ['', 'x']
for s in suffix:
pth = os.path.join(self.dirpath, 'test.xls%s' % s)
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols=3)
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['A', 'B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True, parse_cols=3)
tm.assert_frame_equal(df, df2, check_names=False) # TODO add index to xls file)
tm.assert_frame_equal(df3, df2, check_names=False)
def test_parse_cols_list(self):
_skip_if_no_openpyxl()
_skip_if_no_xlrd()
suffix = ['', 'x']
for s in suffix:
pth = os.path.join(self.dirpath, 'test.xls%s' % s)
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols=[0, 2, 3])
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True,
parse_cols=[0, 2, 3])
tm.assert_frame_equal(df, df2, check_names=False) # TODO add index to xls file
tm.assert_frame_equal(df3, df2, check_names=False)
def test_parse_cols_str(self):
_skip_if_no_openpyxl()
_skip_if_no_xlrd()
suffix = ['', 'x']
for s in suffix:
pth = os.path.join(self.dirpath, 'test.xls%s' % s)
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols='A:D')
df2 = read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['A', 'B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True, parse_cols='A:D')
tm.assert_frame_equal(df, df2, check_names=False) # TODO add index to xls, read xls ignores index name ?
tm.assert_frame_equal(df3, df2, check_names=False)
del df, df2, df3
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols='A,C,D')
df2 = read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True,
parse_cols='A,C,D')
tm.assert_frame_equal(df, df2, check_names=False) # TODO add index to xls file
tm.assert_frame_equal(df3, df2, check_names=False)
del df, df2, df3
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols='A,C:D')
df2 = read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True,
parse_cols='A,C:D')
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
def test_excel_stop_iterator(self):
_skip_if_no_xlrd()
excel_data = ExcelFile(os.path.join(self.dirpath, 'test2.xls'))
parsed = excel_data.parse('Sheet1')
expected = DataFrame([['aaaa', 'bbbbb']], columns=['Test', 'Test1'])
tm.assert_frame_equal(parsed, expected)
def test_excel_cell_error_na(self):
_skip_if_no_xlrd()
excel_data = ExcelFile(os.path.join(self.dirpath, 'test3.xls'))
parsed = excel_data.parse('Sheet1')
expected = | DataFrame([[np.nan]], columns=['Test']) | pandas.DataFrame |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": pandas.StringDtype(),
"bitErrBucketArray7": pandas.StringDtype(),
"bitErrBucketArray8": pandas.StringDtype(),
"bitErrBucketArray9": pandas.StringDtype(),
"bitErrBucketArray10": pandas.StringDtype(),
"bitErrBucketArray11": pandas.StringDtype(),
"bitErrBucketArray12": pandas.StringDtype(),
"bitErrBucketArray13": pandas.StringDtype(),
"bitErrBucketArray14": pandas.StringDtype(),
"bitErrBucketArray15": pandas.StringDtype(),
"bitErrBucketArray16": pandas.StringDtype(),
"bitErrBucketArray17": pandas.StringDtype(),
"bitErrBucketArray18": pandas.StringDtype(),
"bitErrBucketArray19": pandas.StringDtype(),
"bitErrBucketArray20": pandas.StringDtype(),
"bitErrBucketArray21": pandas.StringDtype(),
"bitErrBucketArray22": pandas.StringDtype(),
"bitErrBucketArray23": pandas.StringDtype(),
"bitErrBucketArray24": pandas.StringDtype(),
"bitErrBucketArray25": pandas.StringDtype(),
"bitErrBucketArray26": pandas.StringDtype(),
"bitErrBucketArray27": pandas.StringDtype(),
"bitErrBucketArray28": pandas.StringDtype(),
"bitErrBucketArray29": pandas.StringDtype(),
"bitErrBucketArray30": pandas.StringDtype(),
"bitErrBucketArray31": pandas.StringDtype(),
"bitErrBucketArray32": pandas.StringDtype(),
"bitErrBucketArray33": | pandas.StringDtype() | pandas.StringDtype |
# Test for evaluering af hvert forecast og sammenligning mellem forecast
import pandas as pd
import numpy as np
from numpy.random import rand
from numpy import ix_
from itertools import product
import chart_studio.plotly as py
import chart_studio
import plotly.graph_objs as go
import statsmodels.api as sm
chart_studio.tools.set_credentials_file(username='Emborg', api_key='<KEY>')
np.random.seed(1337)
# Predictions from each forecast
data = pd.read_csv('Data/All_Merged.csv') # , parse_dates=[0], date_parser=dateparse
data.isna().sum()
data.fillna(0, inplace=True)
data = data.set_index('date')
data = data.loc[~data.index.duplicated(keep='first')]
data = data.drop('2018-10-29')
# Forecasts
LSTM = pd.read_csv('Data/LSTM_Pred.csv', index_col=0)
LSTM = LSTM.loc[~LSTM.index.duplicated(keep='first')]
LSTM = LSTM.iloc[:-11, :]
LSTM = LSTM.drop('2018-10-29')
LSTM_NS = pd.read_csv('Data/LSTM_Pred_NoSent.csv', index_col=0)
LSTM_NS = LSTM_NS.loc[~LSTM_NS.index.duplicated(keep='first')]
LSTM_NS = LSTM_NS.iloc[:-11, :]
LSTM_NS = LSTM_NS.drop('2018-10-29')
ARIMA = pd.read_csv('Data/ARIMA_Pred.csv', index_col=0)
ARIMA = ARIMA.iloc[:-11, :]
ARIMA_NS = pd.read_csv('Data/ARIMA_Pred_NoSent.csv', index_col=0)
ARIMA_NS = ARIMA_NS.iloc[:-11, :]
XGB = pd.read_csv('Data/XGB_Pred.csv', index_col=0)
XGB = XGB.loc[~XGB.index.duplicated(keep='first')]
XGB = XGB.iloc[1:, :]
XGB = XGB.drop('2018-10-29')
XGB_NS = pd.read_csv('Data/XGB_Pred_nosenti.csv', index_col=0)
XGB_NS = XGB_NS.loc[~XGB_NS.index.duplicated(keep='first')]
XGB_NS = XGB_NS.iloc[1:, :]
XGB_NS = XGB_NS.drop('2018-10-29')
AR1 = pd.read_csv('Data/AR1.csv', index_col=0)
AR1 = AR1.iloc[:-11, :]
VAR = pd.read_csv('Data/VAR_pred.csv', index_col=0)
VAR = VAR.loc[~VAR.index.duplicated(keep='first')]
VAR = VAR[VAR.index.isin(LSTM.index)]['price']
VAR_NS = pd.read_csv('Data/VAR_pred_nosenti.csv', index_col=0)
VAR_NS = VAR_NS.loc[~VAR_NS.index.duplicated(keep='first')]
VAR_NS = VAR_NS[VAR_NS.index.isin(LSTM.index)]['price']
# Price for the forecasting period
price = data[data.index.isin(LSTM.index)]
price = price[['price']]
ARIMA.index = price.index
ARIMA_NS.index = price.index
XGB.index = price.index
XGB_NS.index = price.index
colors = [
'#1f77b4', # muted blue
'#ff7f0e', # safety orange
'#2ca02c', # cooked asparagus green
'#d62728', # brick red
'#9467bd', # muted purple
'#8c564b', # chestnut brown
'#e377c2', # raspberry yogurt pink
'#7f7f7f', # middle gray
'#bcbd22', # curry yellow-green
'#17becf' # blue-teal
]
# Combined Forecast DataFrame
fc = pd.DataFrame()
fc = price
fc = fc.merge(AR1[['forecast']], how='left', left_index=True, right_index=True)
fc = fc.merge(ARIMA[['forecast']], how='left', left_index=True, right_index=True)
fc = fc.merge(ARIMA_NS[['forecast']], how='left', left_index=True, right_index=True)
fc = fc.merge(VAR, how='left', left_index=True, right_index=True)
fc = fc.merge(VAR_NS, how='left', left_index=True, right_index=True)
fc = fc.merge(XGB, how='left', left_index=True, right_index=True)
fc = fc.merge(XGB_NS, how='left', left_index=True, right_index=True)
fc = fc.merge(LSTM[['LSTM']], how='left', left_index=True, right_index=True)
fc = fc.merge(LSTM_NS[['LSTM']], how='left', left_index=True, right_index=True)
# fc = fc.merge(XGB_NS, how='left', left_index=True, right_index=True)
fc.columns = ['Price', 'AR1', 'ARIMAX', 'ARIMAX_NS', 'VAR', 'VAR_NS', 'XGB', 'XGB_NS', 'LSTM', 'LSTM_NS']
# fc.to_csv(r'Data\All_Forecasts.csv')
fig = go.Figure()
n = 0
for key in fc.columns:
fig.add_trace(go.Scatter(x=fc.index,
y=fc[key],
mode='lines',
name=key,
line=dict(color=colors[n % len(colors)])))
n = n + 1
fig.update_layout(yaxis=dict(title='USD'),
xaxis=dict(title='date'))
py.plot(fig, filename='price_all_fc')
# Actual price
actual = fc[['Price']]
fc = fc.iloc[:, 1:]
# Error metrics
def RMSE(fc, actual):
actual = actual.values
fc = fc.values
losses = fc - actual
RMSE = np.sqrt(np.mean(losses ** 2, axis=0))
return (RMSE)
def MAE(fc, actual):
actual = actual.values
fc = fc.values
losses = fc - actual
MAE = np.mean(np.abs(losses), axis=0)
return (MAE)
def residual_bar_plot(fc_1, fc_2, actuals, name1, name2):
df = pd.DataFrame(fc_1.values - actuals.values)
df[name2] = fc_2.values - actuals.values
df.columns = [name1,name2]
df.hist()
print(name1)
print(round(sm.tsa.stattools.adfuller(df[name1])[1],4))
print(round(sm.stats.stattools.jarque_bera(df[name1])[1],4))
print(name2)
print(round(sm.tsa.stattools.adfuller(df[name2])[1],4))
print(round(sm.stats.stattools.jarque_bera(df[name2])[1],4))
residual_bar_plot(fc[['ARIMAX']], fc[['ARIMAX_NS']], actual, 'ARIMA', 'ARIMA_NS')
residual_bar_plot(fc[['LSTM']], fc[['LSTM_NS']], actual, 'LSTM', 'LSTM_NS')
residual_bar_plot(fc[['VAR']], fc[['VAR_NS']], actual, 'VAR', 'VAR_NS')
residual_bar_plot(fc[['XGB']], fc[['XGB_NS']], actual, 'XGB', 'XGB_NS')
name1 = 'ARIMAX'
fc_1 = fc[['ARIMAX']]
# split_date = '2019-05-01'
# fc = fc.loc[fc.index >= split_date]
# actual = actual.loc[actual.index >= split_date]
rmse = RMSE(fc, actual)
mae = MAE(fc, actual)
print(pd.DataFrame(rmse).to_latex())
# <NAME> testing
dm_result = list()
done_models = list()
models_list = fc.columns
for model1 in models_list:
for model2 in models_list:
if model1 != model2:
dm_result.append(dm_test(fc[[model1]], fc[[model2]], actual))
dm_result = pd.DataFrame(dm_result)
# dm_result['t-stat'] = np.abs(dm_result['t-stat'])
dm_result = dm_result.loc[~np.abs(dm_result['t-stat']).duplicated(keep='first')]
dm_result['t-stat'] = round(dm_result['t-stat'],2)
dm_result['p-value'] = round(dm_result['p-value'],4)
print(dm_result.to_latex())
# <NAME>
cw1 = cw_test(ARIMA, ARIMA_NS, actual)
print(cw1)
cw2 = cw_test(LSTM[['LSTM']], LSTM_NS[['LSTM']], actual)
print(cw2)
cw3 = cw_test(XGB[['est']], XGB_NS[['est']], actual)
print(cw3)
cspe_plot(fc[['XGB_NS']], fc[['XGB']], actual)
# Model Confidence Set
# https://michael-gong.com/blogs/model-confidence-set/?fbclid=IwAR38oo302TSJ4BFqTpluh5aeivkyM6A1cc0tnZ_JUX08PNwRzQkIi4WPlps
# Wrap data and compute the Mean Absolute Error
MCS_data = pd.DataFrame(np.c_[fc.AR1, fc.ARIMAX, fc.ARIMAX_NS, fc.LSTM, fc.LSTM_NS, fc.VAR, fc.VAR_NS, fc.XGB, fc.XGB_NS, actual.Price],
columns=['AR1','ARIMAX', 'ARIMAX_NS', 'LSTM', 'LSTM_NS','VAR','VAR_NS','XGB','XGB_NS', 'Actual'])
losses = pd.DataFrame()
for model in MCS_data.columns: #['ARIMA', 'ARIMA_NS', 'LSTM', 'LSTM_NS']:
losses[model] = np.abs(MCS_data[model] - MCS_data['Actual'])
losses=losses.iloc[:,:-1]
mcs = ModelConfidenceSet(losses, 0.1, 3, 1000).run()
mcs.included
mcs.pvalues
# Forecast combinations
fc.columns[1:]
l1 = fc.columns[1:].values
l2 = ['ARIMAX', 'VAR', 'XGB','LSTM']
l3 = ['ARIMAX_NS', 'VAR_NS', 'XGB_NS','LSTM_NS']
comb_results = pd.DataFrame([[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]])
comb_results.index = ['All','S','NS']
comb_results.columns = ['Equal', 'MSE', 'Rank', 'Time(1)','Time(7)']
l_list = [l1,l2,l3]
i = 0
for l in l_list:
print(l)
pred = fc[l]
# Combinations
eq = fc_comb(actual=actual, fc=pred, weights="equal")
#bgw = fc_comb(actual=actual, fc=fc[fc.columns[1:]], weights="BGW")
mse = fc_comb(actual=actual, fc=pred, weights="MSE")
rank = fc_comb(actual=actual, fc=pred, weights="rank")
time = fc_comb(actual=actual, fc=pred, weights="time")
time7 = fc_comb(actual=actual, fc=pred, weights="time", window=7)
time14 = fc_comb(actual=actual, fc=pred, weights="time", window=14)
time30 = fc_comb(actual=actual, fc=pred, weights="time", window=30)
time60 = fc_comb(actual=actual, fc=pred, weights="time", window=60)
comb_results.iloc[i,0] = MAE(eq, actual)
comb_results.iloc[i,1] = MAE(mse, actual)
comb_results.iloc[i,2] = MAE(rank, actual)
comb_results.iloc[i,3] = MAE(time, actual)
comb_results.iloc[i,4] = MAE(time7, actual)
i = i + 1
print(round(comb_results,2).to_latex())
rank = pd.DataFrame(rank)
rank.columns = ['Rank']
eq = pd.DataFrame(eq)
eq.columns = ['Eq']
dm_test(rank[['Rank']], eq[['Eq']], actual)
# Fun
# ctions
# <NAME> test function
def dm_test(fc, fc_nested, actual):
fc_name = fc.columns[0]
fc_nested_name = fc_nested.columns[0]
import statsmodels.formula.api as smf
from sklearn.metrics import mean_squared_error
fc = fc.values
fc_nested = fc_nested.values
actual = price.values
e_fc = actual - fc
e_nested = actual - fc_nested
f_dm = e_nested ** 2 - e_fc ** 2
f_dm = pd.DataFrame(f_dm, columns=['f_dm'])
nwResult = smf.ols('f_dm ~ 1', data=f_dm).fit(cov_type='HAC', cov_kwds={'maxlags': 1})
dm_out = dict()
dm_out['t-stat'] = nwResult.tvalues[0]
dm_out['p-value'] = round(nwResult.pvalues[0], 4)
if dm_out['p-value'] < 0.05:
if mean_squared_error(actual, fc) < mean_squared_error(actual, fc_nested):
dm_out['conclusion'] = 'First forecast is best ' + fc_name + ' better then ' + fc_nested_name
else:
dm_out['conclusion'] = 'Second forecast is best' + fc_nested_name + ' better then ' + fc_name
else:
dm_out['conclusion'] = 'Forecasts have equal predictive power between ' + fc_nested_name + ' and ' + fc_name
return dm_out
# Clark West test function
def cw_test(fc, fc_nested, actual):
import statsmodels.formula.api as smf
from sklearn.metrics import mean_squared_error
fc = fc.values
fc_nested = fc_nested.values
actual = price.values
e_fc = actual - fc
e_nested = actual - fc_nested
e_diff = fc - fc_nested
f_CW = e_nested ** 2 - e_fc ** 2 + e_diff ** 2
f_CW = | pd.DataFrame(f_CW, columns=['f_CW']) | pandas.DataFrame |
'''
Copyright <NAME> and <NAME>
2015, 2016, 2017, 2018
'''
from __future__ import print_function # Python 2.7 and 3 compatibility
import os
import sys
import time
import shutil
#import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Standard imports
from numpy import pi
from numpy.linalg import inv
from stat import S_ISREG, ST_CTIME, ST_MODE
from pandas import HDFStore, Series, DataFrame
from collections import OrderedDict
from pathlib import Path
# pyEPR custom imports
from . import hfss
from . import logger
from . import config
from . import AttrDict
from .hfss import ureg, CalcObject, ConstantVecCalcObject, set_property
from .toolbox import print_NoNewLine, print_color, deprecated, fact, epsilon_0, hbar, Planck, fluxQ, nck, \
divide_diagonal_by_2, print_matrix, DataFrame_col_diff, get_instance_vars,\
sort_df_col, sort_Series_idx
from .toolbox_circuits import Calcs_basic
from .toolbox_plotting import cmap_discrete, legend_translucent
from .numeric_diag import bbq_hmt, make_dispersive
import matplotlib as mpl
from .toolbox_report import plot_convergence_f_vspass, plot_convergence_max_df, plot_convergence_solved_elem, plot_convergence_maxdf_vs_sol
class Project_Info(object):
"""
Class containing options and information about the manipulation and analysis in HFSS.
Junction info:
-----------------------
self.junctions : OrderedDict()
A Josephson tunnel junction has to have its parameters specified here for the analysis.
Each junction is given a name and is specified by a dictionary.
It has the following properties:
1. `Lj_variable` : Name of HFSS variable that specifies junction inductance Lj defined on the boundary condition in HFSS. DO NOT USE Global names that start with $.
2. `rect` : Name of HFSS rectangle on which lumped boundary condition is specified.
3. `line` : Name of HFSS polyline which spans the length of the recntalge. Used to define the voltage across the junction. Used to define the current orientation for each junction. Used to define sign of ZPF.
4. `length` : Length in HFSS of the junction rectangle and line (specified in meters).
Example definition:
..code-block python
# Define a single junction
pinfo = Project_Info('')
pinfo.junctions['j1'] = {'Lj_variable' : 'Lj1',
'rect' : 'JJrect1',
'line' : 'JJline1',
'length' : parse_units('50um')} # Length is in meters
# Specify multiple junctions in HFSS model
n_junctions = 5
for i in range(1, 1+n_junctions):
pinfo.junctions[f'j{i}'] = {'Lj_variable' : f'Lj{i}',
'rect' : f'JJrect{i}',
'line' : f'JJline{i}',
'length' : parse_units('50um')}
HFSS app connection settings
-----------------------
project_path : str
Directory path to the hfss project file. Should be the directory, not the file.
default = None: Assumes the project is open, and thus gets the project based on `project_name`
project_name : str, None
Name of the project within the project_path. "None" will get the current active one.
design_name : str, None
Name of the design within the project. "None" will get the current active one.
setup_name : str, None
Name of the setup within the design. "None" will get the current active one.
Additional init setting:
-----------------------
do_connect : True by default. Connect to HFSS
HFSS desgin settings
-----------------------
describe junction parameters
junc_rects = None
Name of junction rectangles in HFSS
junc_lines = None
Name of lines in HFSS used to define the current orientation for each junction
junc_LJ_names = None
Name of junction inductance variables in HFSS.
Note, DO NOT USE Global names that start with $.
junc_lens = None
Junciton rect. length, measured in meters.
"""
class _Dissipative:
#TODO: remove and turn to dict
def __init__(self):
self.dielectrics_bulk = None
self.dielectric_surfaces = None
self.resistive_surfaces = None
self.seams = None
def __init__(self, project_path=None, project_name=None, design_name=None,
do_connect = True):
self.project_path = str(Path(project_path)) if not (project_path is None) else None # Path: format path correctly to system convention
self.project_name = project_name
self.design_name = design_name
self.setup_name = None
## HFSS desgin: describe junction parameters
# TODO: introduce modal labels
self.junctions = OrderedDict() # See above for help
self.ports = OrderedDict()
## Dissipative HFSS volumes and surfaces
self.dissipative = self._Dissipative()
self.options = config.options_hfss
# Conected to HFSS variable
self.app = None
self.desktop = None
self.project = None
self.design = None
self.setup = None
if do_connect:
self.connect()
_Forbidden = ['app', 'design', 'desktop', 'project',
'dissipative', 'setup', '_Forbidden', 'junctions']
def save(self, hdf):
'''
hdf : pd.HDFStore
'''
hdf['project_info'] = pd.Series(get_instance_vars(self, self._Forbidden))
hdf['project_info_dissip'] = pd.Series(get_instance_vars(self.dissipative))
hdf['project_info_options'] = pd.Series(get_instance_vars(self.options))
hdf['project_info_junctions'] = pd.DataFrame(self.junctions)
hdf['project_info_ports'] = pd.DataFrame(self.ports)
@deprecated
def connect_to_project(self):
return self.connect()
def connect(self):
'''
Connect to HFSS design.
'''
#logger.info('Connecting to HFSS ...')
self.app, self.desktop, self.project = hfss.load_ansys_project(
self.project_name, self.project_path)
self.project_name = self.project.name
self.project_path = self.project.get_path()
# Design
if self.design_name is None:
self.design = self.project.get_active_design()
self.design_name = self.design.name
logger.info(f'\tOpened active design\n\tDesign: {self.design_name} [Solution type: {self.design.solution_type}]')
else:
try:
self.design = self.project.get_design(self.design_name)
except Exception as e:
tb = sys.exc_info()[2]
logger.error(f"Original error: {e}\n")
raise(Exception(' Did you provide the correct design name? Failed to pull up design.').with_traceback(tb))
#if not ('Eigenmode' == self.design.solution_type):
# logger.warning('\tWarning: The design tpye is not Eigenmode. Are you sure you dont want eigenmode?')
# Setup
try:
n_setups = len(self.design.get_setup_names())
if n_setups == 0:
logger.warning('\tNo design setup detected.')
if self.design.solution_type == 'Eigenmode':
logger.warning('\tCreating eigenmode default setup one.')
self.design.create_em_setup()
self.setup_name = 'Setup'
self.setup = self.design.get_setup(name=self.setup_name)
self.setup_name = self.setup.name
logger.info(f'\tOpened setup: {self.setup_name} [{type(self.setup)}]')
except Exception as e:
tb = sys.exc_info()[2]
logger.error(f"Original error: {e}\n")
raise(Exception(' Did you provide the correct setup name? Failed to pull up setup.').with_traceback(tb))
# Finalize
self.project_name = self.project.name
self.design_name = self.design.name
logger.info('\tConnected successfully.\t :)\t :)\t :)\t\n')
return self
def check_connected(self):
"""Checks if fully connected including setup
"""
return\
(self.setup is not None) and\
(self.design is not None) and\
(self.project is not None) and\
(self.desktop is not None) and\
(self.app is not None)
def disconnect(self):
'''
Disconnect from existing HFSS design.
'''
assert self.check_connected(
) is True, "it does not appear that you have connected to HFSS yet. use connect()"
self.project.release()
self.desktop.release()
self.app.release()
hfss.release()
### UTILITY FUNCTIONS
def get_dm(self):
'''
Get the design and modeler
.. code-block:: python
oDesign, oModeler = projec.get_dm()
'''
oDesign = self.design
oModeler = oDesign.modeler
return oDesign, oModeler
def get_all_variables_names(self):
"""Returns array of all project and local design names."""
return self.project.get_variable_names() + self.design.get_variable_names()
def get_all_object_names(self):
"""Returns array of strings"""
oObjects = []
for s in ["Non Model", "Solids", "Unclassified", "Sheets", "Lines"]:
oObjects += self.design.modeler.get_objects_in_group(s)
return oObjects
def validate_junction_info(self):
""" Validate that the user has put in the junction info correctly.
Do no also forget to check the length of the rectangles/line of
the junction if you change it.
"""
all_variables_names = self.get_all_variables_names()
all_object_names = self.get_all_object_names()
for jjnm, jj in self.junctions.items():
assert jj['Lj_variable'] in all_variables_names, "pyEPR project_info user error found: Seems like for junction `%s` you specified a design or project variable for `Lj_variable` that does not exist in HFSS by the name: `%s` " % (
jjnm, jj['Lj_variable'])
for name in ['rect', 'line']:
assert jj[name] in all_object_names, "pyEPR project_info user error found: Seems like for junction `%s` you specified a %s that does not exist in HFSS by the name: `%s` " % (
jjnm, name, jj[name])
#TODO: Check the length of the rectnagle
#==============================================================================
#%% Main compuation class & interface with HFSS
#==============================================================================
class pyEPR_HFSS(object):
"""
This class defines a pyEPR_HFSS object which calculates and saves
Hamiltonian parameters from an HFSS simulation.
Further, it allows one to calcualte dissipation, etc
"""
def __init__(self, *args, **kwargs):
'''
Parameters:
-------------------
project_info : Project_Info
Suplpy the project info or the parameters to create pinfo
Example use:
-------------------
'''
if (len(args) == 1) and (args[0].__class__.__name__ == 'Project_Info'): #isinstance(args[0], Project_Info): # fails on module repload with changes
project_info = args[0]
else:
assert len(args) == 0, 'Since you did not pass a Project_info object as a arguemnt, we now assuem you are trying to create a project info object here by apassing its arguments. See Project_Info. It does not take any arguments, only kwargs.'
project_info = Project_Info(*args, **kwargs)
# Input
self.pinfo = project_info
if self.pinfo.check_connected() is False:
self.pinfo.connect()
self.verbose = True #TODO: change verbose to logger. remove verbose flags
self.append_analysis = False #TODO
# hfss connect module
self.fields = self.setup.get_fields()
self.solutions = self.setup.get_solutions()
# Variations - the following get updated in update_variation_information
self.nmodes = int(1)
self.listvariations = ("",)
self.nominalvariation = '0'
self.nvariations = 0
self.update_variation_information()
self.hfss_variables = OrderedDict() # container for eBBQ list of varibles
if self.verbose:
print('Design \"%s\" info:'%self.design.name)
print('\t%-15s %d\n\t%-15s %d' %('# eigenmodes', self.nmodes, '# variations', self.nvariations))
# Setup data saving
self.setup_data()
self.latest_h5_path = None # #self.get_latest_h5()
''' #TODO: to be implemented to use old files
if self.latest_h5_path is not None and self.append_analysis:
latest_bbq_analysis = pyEPR_Analysis(self.latest_h5_path)
if self.verbose:
print( 'Varied variables and values : ', latest_bbq_analysis.get_swept_variables(), \
'Variations : ', latest_bbq_analysis.variations)
'''
@property
def setup(self):
return self.pinfo.setup
@property
def design(self):
return self.pinfo.design
@property
def project(self):
return self.pinfo.project
@property
def desktop(self):
return self.pinfo.desktop
@property
def app(self):
return self.pinfo.app
@property
def junctions(self):
return self.pinfo.junctions
@property
def ports(self):
return self.pinfo.ports
@property
def options(self):
return self.pinfo.options
def get_latest_h5(self):
'''
No longer used. Could be added back in.
'''
dirpath = self.data_dir
entries1 = (os.path.join(dirpath, fn) for fn in os.listdir(dirpath)) # get all entries in the directory w/ stats
entries2 = ((os.stat(path), path) for path in entries1)
entries3 = ((stat[ST_CTIME], path) # leave only regular files, insert creation date
for stat, path in entries2 if S_ISREG(stat[ST_MODE]) and path[-4:]=='hdf5')
#NOTE: on Windows `ST_CTIME` is a creation date but on Unix it could be something else
#NOTE: use `ST_MTIME` to sort by a modification date
paths_sorted = []
for cdate, path in sorted(entries3):
paths_sorted.append(path)
#print time.ctime(cdate), os.path.basename(path)
if len(paths_sorted) > 0:
self.latest_h5_path = paths_sorted[-1]
if self.verbose:
print('This simulations has been analyzed, latest data in ' + self.latest_h5_path)
else:
self.latest_h5_path = None
if self.verbose:
print('This simulation has never been analyzed')
def setup_data(self):
'''
Set up folder paths for saving data to.
'''
data_dir = Path(config.root_dir) / \
Path(self.project.name)/Path(self.design.name)
#if self.verbose:
# print("\nResults will be saved to:\n" +'- '*20+'\n\t'+ str(data_dir)+'\n'+'- '*20+'\n')
if len(self.design.name) > 50:
print_color('WARNING! DESING FILENAME MAY BE TOO LONG! ')
if not data_dir.is_dir():
data_dir.mkdir(parents=True, exist_ok=True)
self.data_dir = str(data_dir)
self.data_filename = str(
data_dir / (time.strftime('%Y-%m-%d %H-%M-%S', time.localtime()) + '.hdf5'))
"""
@deprecated
def calc_p_j(self, modes=None, variation=None):
'''
Calculates the p_j for all the modes.
Requires a calculator expression called P_J.
'''
lv = self.get_lv(variation)
if modes is None:
modes = range(self.nmodes)
pjs = OrderedDict()
for ii, m in enumerate(modes):
print('Calculating p_j for mode ' + str(m) + ' (' + str(ii) + '/' + str(np.size(modes)-1) + ')')
self.solutions.set_mode(m+1, 0)
self.fields = self.setup.get_fields()
P_J = self.fields.P_J
pjs['pj_'+str(m)] = P_J.evaluate(lv=lv)
self.pjs = pjs
if self.verbose:
print(pjs)
return pjs
"""
def calc_p_junction_single(self, mode):
'''
This function is used in the case of a single junction only.
For multiple junctions, see `calc_p_junction`.
Assumes no lumped capacitive elements.
'''
pj = OrderedDict()
pj_val = (self.U_E-self.U_H)/self.U_E
pj['pj_'+str(mode)] = np.abs(pj_val)
print(' p_j_' + str(mode) + ' = ' + str(pj_val))
return pj
#TODO: replace this method with the one below, here because osme funcs use it still
def get_freqs_bare(self, variation):
#str(self.get_lv(variation))
freqs_bare_vals = []
freqs_bare_dict = OrderedDict()
freqs, kappa_over_2pis = self.solutions.eigenmodes(
self.get_lv_EM(variation))
for m in range(self.nmodes):
freqs_bare_dict['freq_bare_'+str(m)] = 1e9*freqs[m]
freqs_bare_vals.append(1e9*freqs[m])
if kappa_over_2pis is not None:
freqs_bare_dict['Q_'+str(m)] = freqs[m]/kappa_over_2pis[m]
else:
freqs_bare_dict['Q_'+str(m)] = 0
self.freqs_bare = freqs_bare_dict
self.freqs_bare_vals = freqs_bare_vals
return freqs_bare_dict, freqs_bare_vals
def get_freqs_bare_pd(self, variation):
'''
Retun pd.Sereis of modal freq and qs for given variation
'''
freqs, kappa_over_2pis = self.solutions.eigenmodes(
self.get_lv_EM(variation))
if kappa_over_2pis is None:
kappa_over_2pis = np.zeros(len(freqs))
freqs = pd.Series(freqs, index=range(len(freqs))) # GHz
Qs = freqs / pd.Series(kappa_over_2pis, index=range(len(freqs)))
return freqs, Qs
def get_lv(self, variation=None):
'''
List of variation variables.
Returns list of var names and var values.
Such as ['Lj1:=','13nH', 'QubitGap:=','100um']
Parameters
-----------
variation : string number such as '0' or '1' or ...
'''
if variation is None:
lv = self.nominalvariation
lv = self.parse_listvariations(lv)
else:
lv = self.listvariations[ureg(variation)]
lv = self.parse_listvariations(lv)
return lv
def get_lv_EM(self, variation):
if variation is None:
lv = self.nominalvariation
#lv = self.parse_listvariations_EM(lv)
else:
lv = self.listvariations[ureg(variation)]
#lv = self.parse_listvariations_EM(lv)
return str(lv)
def parse_listvariations_EM(self, lv):
lv = str(lv)
lv = lv.replace("=", ":=,")
lv = lv.replace(' ', ',')
lv = lv.replace("'", "")
lv = lv.split(",")
return lv
def parse_listvariations(self, lv):
lv = str(lv)
lv = lv.replace("=", ":=,")
lv = lv.replace(' ', ',')
lv = lv.replace("'", "")
lv = lv.split(",")
return lv
def get_variables(self, variation=None):
lv = self.get_lv(variation)
variables = OrderedDict()
for ii in range(int(len(lv)/2)):
variables['_'+lv[2*ii][:-2]] = lv[2*ii+1]
self.variables = variables
return variables
def calc_energy_electric(self,
variation=None,
volume='AllObjects',
smooth=False):
r'''
Calculates two times the peak electric energy, or 4 times the RMS, :math:`4*\mathcal{E}_{\mathrm{elec}}`
(since we do not divide by 2 and use the peak phasors).
.. math::
\mathcal{E}_{\mathrm{elec}}=\frac{1}{4}\mathrm{Re}\int_{V}\mathrm{d}v\vec{E}_{\text{max}}^{*}\overleftrightarrow{\epsilon}\vec{E}_{\text{max}}
volume : string | 'AllObjects'
smooth : bool | False
Smooth the electric field or not when performing calculation
Example use to calcualte the energy participation of a substrate
.. code-block python
ℰ_total = epr_hfss.calc_energy_electric(volume='AllObjects')
ℰ_substr = epr_hfss.calc_energy_electric(volume='Box1')
print(f'Energy in substrate = {100*ℰ_substr/ℰ_total:.1f}%')
'''
calcobject = CalcObject([], self.setup)
vecE = calcobject.getQty("E")
if smooth:
vecE = vecE.smooth()
A = vecE.times_eps()
B = vecE.conj()
A = A.dot(B)
A = A.real()
A = A.integrate_vol(name=volume)
lv = self.get_lv(variation)
return A.evaluate(lv=lv)
def calc_energy_magnetic(self,
variation=None,
volume='AllObjects',
smooth=True):
'''
See calc_energy_electric
'''
calcobject = CalcObject([], self.setup)
vecH = calcobject.getQty("H")
if smooth:
vecH = vecH.smooth()
A = vecH.times_mu()
B = vecH.conj()
A = A.dot(B)
A = A.real()
A = A.integrate_vol(name=volume)
lv = self.get_lv(variation)
return A.evaluate(lv=lv)
def calc_p_electric_volume(self,
name_dielectric3D,
relative_to='AllObjects',
E_total=None
):
r'''
Calculate the dielectric energy-participatio ratio
of a 3D object (one that has volume) relative to the dielectric energy of
a list of object objects.
This is as a function relative to another object or all objects.
When all objects are specified, this does not include any energy
that might be stored in any lumped elements or lumped capacitors.
Returns:
---------
ℰ_object/ℰ_total, (ℰ_object, _total)
'''
if E_total is None:
logger.debug('Calculating ℰ_total')
ℰ_total = self.calc_energy_electric(volume=relative_to)
else:
ℰ_total = E_total
logger.debug('Calculating ℰ_object')
ℰ_object = self.calc_energy_electric(volume=name_dielectric3D)
return ℰ_object/ℰ_total, (ℰ_object, ℰ_total)
def calc_current(self, fields, line):
'''
Function to calculate Current based on line. Not in use
line : integration line between plates - name
'''
self.design.Clear_Field_Clac_Stack()
comp = fields.Vector_H
exp = comp.integrate_line_tangent(line)
I = exp.evaluate(phase = 90)
self.design.Clear_Field_Clac_Stack()
return I
def calc_avg_current_J_surf_mag(self, variation, junc_rect, junc_line):
''' Peak current I_max for mdoe J in junction J
The avg. is over the surface of the junction. I.e., spatial. '''
lv = self.get_lv(variation)
jl, uj = self.get_junc_len_dir(variation, junc_line)
uj = ConstantVecCalcObject(uj, self.setup)
calc = CalcObject([], self.setup)
#calc = calc.getQty("Jsurf").mag().integrate_surf(name = junc_rect)
calc = (((calc.getQty("Jsurf")).dot(uj)).imag()
).integrate_surf(name=junc_rect)
I = calc.evaluate(lv=lv) / jl # phase = 90
#self.design.Clear_Field_Clac_Stack()
return I
def calc_current_line_voltage(self, variation, junc_line_name, junc_L_Henries):
'''
Peak current I_max for prespecified mode calculating line voltage across junction.
Parameters:
------------------------------------------------
variation: variation number
junc_line_name: name of the HFSS line spanning the junction
junc_L_Henries: junction inductance in henries
TODO: Smooth?
'''
lv = self.get_lv(variation)
v_calc_real = CalcObject([], self.setup).getQty(
"E").real().integrate_line_tangent(name=junc_line_name)
v_calc_imag = CalcObject([], self.setup).getQty(
"E").imag().integrate_line_tangent(name=junc_line_name)
V = np.sqrt(v_calc_real.evaluate(lv=lv)**2 +
v_calc_imag.evaluate(lv=lv)**2)
freq = CalcObject(
[('EnterOutputVar', ('Freq', "Complex"))], self.setup).real().evaluate()
return V/(2*np.pi*freq*junc_L_Henries) # I=V/(wL)s
def calc_line_current(self, variation, junc_line_name):
lv = self.get_lv(variation)
calc = CalcObject([], self.setup)
calc = calc.getQty("H").imag().integrate_line_tangent(
name=junc_line_name)
#self.design.Clear_Field_Clac_Stack()
return calc.evaluate(lv=lv)
def get_junc_len_dir(self, variation, junc_line):
'''
Return the length and direction of a junction defined by a line
Inputs: variation: simulation variation
junc_line: polyline object
Outputs: jl (float) junction length
uj (list of 3 floats) x,y,z coordinates of the unit vector
tangent to the junction line
'''
#
lv = self.get_lv(variation)
u = []
for coor in ['X', 'Y', 'Z']:
calc = CalcObject([], self.setup)
calc = calc.line_tangent_coor(junc_line, coor)
u.append(calc.evaluate(lv=lv))
jl = float(np.sqrt(u[0]**2+u[1]**2+u[2]**2))
uj = [float(u[0]/jl), float(u[1]/jl), float(u[2]/jl)]
return jl, uj
def get_Qseam(self, seam, mode, variation):
r'''
Caculate the contribution to Q of a seam, by integrating the current in
the seam with finite conductance: set in the config file
ref: http://arxiv.org/pdf/1509.01119.pdf
'''
lv = self.get_lv(variation)
Qseam = OrderedDict()
print('Calculating Qseam_' + seam + ' for mode ' + str(mode) +
' (' + str(mode) + '/' + str(self.nmodes-1) + ')')
# overestimating the loss by taking norm2 of j, rather than jperp**2
j_2_norm = self.fields.Vector_Jsurf.norm_2()
int_j_2 = j_2_norm.integrate_line(seam)
int_j_2_val = int_j_2.evaluate(lv=lv, phase=90)
yseam = int_j_2_val/self.U_H/self.omega
Qseam['Qseam_'+seam+'_' +
str(mode)] = config.Dissipation_params.gseam/yseam
print('Qseam_' + seam + '_' + str(mode) + str(' = ') +
str(config.Dissipation_params.gseam/config.Dissipation_params.yseam))
return Series(Qseam)
def get_Qseam_sweep(self, seam, mode, variation, variable, values, unit, pltresult=True):
# values = ['5mm','6mm','7mm']
# ref: http://arxiv.org/pdf/1509.01119.pdf
self.solutions.set_mode(mode+1, 0)
self.fields = self.setup.get_fields()
freqs_bare_dict, freqs_bare_vals = self.get_freqs_bare(variation)
self.omega = 2*np.pi*freqs_bare_vals[mode]
print(variation)
print(type(variation))
print(ureg(variation))
self.U_H = self.calc_energy_magnetic(variation)
lv = self.get_lv(variation)
Qseamsweep = []
print('Calculating Qseam_' + seam + ' for mode ' + str(mode) +
' (' + str(mode) + '/' + str(self.nmodes-1) + ')')
for value in values:
self.design.set_variable(variable, str(value)+unit)
# overestimating the loss by taking norm2 of j, rather than jperp**2
j_2_norm = self.fields.Vector_Jsurf.norm_2()
int_j_2 = j_2_norm.integrate_line(seam)
int_j_2_val = int_j_2.evaluate(lv=lv, phase=90)
yseam = int_j_2_val/self.U_H/self.omega
Qseamsweep.append(config.Dissipation_params.gseam/yseam)
# Qseamsweep['Qseam_sweep_'+seam+'_'+str(mode)] = gseam/yseam
#Cprint 'Qseam_' + seam + '_' + str(mode) + str(' = ') + str(gseam/yseam)
if pltresult:
_, ax = plt.subplots()
ax.plot(values, Qseamsweep)
ax.set_yscale('log')
ax.set_xlabel(variable+' ('+unit+')')
ax.set_ylabel('Q'+'_'+seam)
return Qseamsweep
def get_Qdielectric(self, dielectric, mode, variation):
Qdielectric = OrderedDict()
print('Calculating Qdielectric_' + dielectric + ' for mode ' +
str(mode) + ' (' + str(mode) + '/' + str(self.nmodes-1) + ')')
U_dielectric = self.calc_energy_electric(variation, volume=dielectric)
p_dielectric = U_dielectric/self.U_E
#TODO: Update make p saved sep. and get Q for diff materials, indep. specify in pinfo
Qdielectric['Qdielectric_'+dielectric+'_' +
str(mode)] = 1/(p_dielectric*config.Dissipation_params.tan_delta_sapp)
print('p_dielectric'+'_'+dielectric+'_' +
str(mode)+' = ' + str(p_dielectric))
return Series(Qdielectric)
def get_Qsurface_all(self, mode, variation):
'''
caculate the contribution to Q of a dieletric layer of dirt on all surfaces
set the dirt thickness and loss tangent in the config file
ref: http://arxiv.org/pdf/1509.01854.pdf
'''
lv = self.get_lv(variation)
Qsurf = OrderedDict()
print('Calculating Qsurface for mode ' + str(mode) +
' (' + str(mode) + '/' + str(self.nmodes-1) + ')')
# A = self.fields.Mag_E**2
# A = A.integrate_vol(name='AllObjects')
# U_surf = A.evaluate(lv=lv)
calcobject = CalcObject([], self.setup)
vecE = calcobject.getQty("E")
A = vecE
B = vecE.conj()
A = A.dot(B)
A = A.real()
A = A.integrate_surf(name='AllObjects')
U_surf = A.evaluate(lv=lv)
U_surf *= config.Dissipation_params.th*epsilon_0*config.Dissipation_params.eps_r
p_surf = U_surf/self.U_E
Qsurf['Qsurf_'+str(mode)] = 1 / \
(p_surf*config.Dissipation_params.tan_delta_surf)
print('p_surf'+'_'+str(mode)+' = ' + str(p_surf))
return | Series(Qsurf) | pandas.Series |
import unittest
import numpy as np
import pandas as pd
from pyalink.alink import *
class TestPinyi(unittest.TestCase):
def test_pca(self):
data = np.array([
["1.0 2.0 4.0", "a"],
["-1.0 -3.0 4.0", "a"],
["4.0 2.0 3.0", "b"],
["3.4 5.1 5.0", "b"]
])
df = pd.DataFrame({"vec": data[:, 0], "lable": data[:, 1]})
source = dataframeToOperator(df, schemaStr='vec string, label string', op_type='batch')
pca = PCA() \
.setK(2) \
.setCalculationType("CORR") \
.setPredictionCol("pred") \
.setReservedCols(["label"]) \
.setVectorCol("vec")
model = pca.fit(source)
model.transform(source).print()
def test_pca2(self):
data = np.array([
[0.0, 0.0, 0.0],
[0.1, 0.2, 0.1],
[0.2, 0.2, 0.8],
[9.0, 9.5, 9.7],
[9.1, 9.1, 9.6],
[9.2, 9.3, 9.9]
])
df = | pd.DataFrame({"x1": data[:, 0], "x2": data[:, 1], "x3": data[:, 2]}) | pandas.DataFrame |
import os
import gc
import sys
import time
import copy
import joblib
import numpy as np
import pandas as pd
import warnings
# Suppress warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
from pathlib import Path
from sklearn import preprocessing
from sklearn.base import TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction import FeatureHasher
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.utils.metaestimators import if_delegate_has_method
# Workaround for Keras issue #1406
# "Using X backend." always printed to stdout #1406
# https://github.com/keras-team/keras/issues/1406
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
import keras
from keras import backend as kerasbackend
from keras.wrappers.scikit_learn import KerasClassifier
from keras.wrappers.scikit_learn import KerasRegressor
sys.stderr = stderr
import _utils as utils
class PersistentModel:
"""
A general class to manage persistent models
"""
def __init__(self):
"""
Basic contructor
"""
self.name = None
self.state = None
self.state_timestamp = None
self.using_keras = False
def save(self, name, path, overwrite=True, compress=3, locked_timeout=2):
"""
Save the model to disk at the specified path.
If the model already exists and overwrite=False, throw an exception.
If overwrite=True, replace any existing file with the same name at the path.
If the model is found to be locked, wait 'locked_timeout' seconds and try again before quitting.
"""
# Create string for path and file name
f = path + name + '.joblib'
# Create a path for the lock file
f_lock = f + '.lock'
# Create the directory if required
try:
Path(path).mkdir(parents=True, exist_ok=False)
except FileExistsError:
pass
# If the file exists and overwriting is not allowed, raise an exception
if Path(f).exists() and not overwrite:
raise FileExistsError("The specified model name already exists: {0}.".format(name + '.joblib')\
+"\nPass overwrite=True if it is ok to overwrite.")
# Check if the file is currently locked
elif Path(f_lock).exists():
# Wait a few seconds and check again
time.sleep(locked_timeout)
# If the file is still locked raise an exception
if Path(f_lock).exists():
raise TimeoutError("The specified model is locked. If you believe this to be wrong, please delete file {0}".format(f_lock))
else:
# Update properties
self.name = name
self.state = 'saved'
self.state_timestamp = time.time()
# Keras models are excluded from the joblib file as they are saved to a special HDF5 file in _sklearn.py
try:
if self.using_keras:
# Get the trained keras model from the pipeline's estimator
keras_model = self.pipe.named_steps['estimator'].model
# Save the keras model architecture and weights to disk
keras_model.save(path + name + '.h5', overwrite=overwrite)
# The Keras estimator is excluded from the model saved to the joblib file
self.pipe.named_steps['estimator'].model = None
except AttributeError:
pass
# Create the lock file
joblib.dump(f_lock, filename=Path(f_lock), compress=compress)
try:
# Store this instance to file
joblib.dump(self, filename=Path(f), compress=compress)
finally:
# Delete the lock file
Path(f_lock).unlink()
return self
def load(self, name, path):
"""
Check if the model exists at the specified path and return it to the caller.
If the model is not found throw an exception.
"""
with open(Path(path + name + '.joblib'), 'rb') as f:
self = joblib.load(f)
# If using Keras we need to load the HDF5 file as well
# The model will only be available if the fit method has been called previously
if self.using_keras and hasattr(self, 'pipe'):
# Avoid tensorflow error for keras models
# https://github.com/tensorflow/tensorflow/issues/14356
# https://stackoverflow.com/questions/40785224/tensorflow-cannot-interpret-feed-dict-key-as-tensor
kerasbackend.clear_session()
# Load the keras model architecture and weights from disk
keras_model = keras.models.load_model(path + name + '.h5')
keras_model._make_predict_function()
# Point the estimator in the sklearn pipeline to the keras model architecture and weights
self.pipe.named_steps['estimator'].model = keras_model
return self
class Preprocessor(TransformerMixin):
"""
A class that preprocesses a given dataset based on feature definitions passed as a dataframe.
This class automates One Hot Encoding, Hashing, Text Vectorizing and Scaling.
"""
def __init__(self, features, return_type='np', scale_hashed=True, scale_vectors=True, missing="zeros", scaler="StandardScaler", logfile=None, **kwargs):
"""
Initialize the Preprocessor object based on the features dataframe.
**kwargs are keyword arguments passed to the sklearn scaler instance.
The features dataframe must include these columns: name, variable_type, feature_strategy.
If Feature_Strategy includes hashing or text vectorizing, the strategy_args column must also be included.
The dataframe must be indexed by name.
For further information on the columns refer to the project documentation:
https://github.com/nabeel-oz/qlik-py-tools
"""
self.features = features
self.return_type = return_type
self.scale_hashed = scale_hashed
self.scale_vectors = scale_vectors
self.missing = missing
self.scaler = scaler
self.kwargs = kwargs
self.ohe = False
self.hash = False
self.cv = False
self.tfidf = False
self.text = False
self.scale = False
self.no_prep = False
self.log = logfile
# Collect features for one hot encoding
self.ohe_meta = features.loc[features["feature_strategy"] == "one hot encoding"].copy()
# Set a flag if one hot encoding will be required
if len(self.ohe_meta) > 0:
self.ohe = True
# Collect features for hashing
self.hash_meta = features.loc[features["feature_strategy"] == "hashing"].copy()
# Set a flag if feature hashing will be required
if len(self.hash_meta) > 0:
self.hash = True
# Convert strategy_args column to integers
self.hash_meta.loc[:,"strategy_args"] = self.hash_meta.loc[:,"strategy_args"].astype(np.int64, errors="ignore")
# Collect features for count vectorizing
self.cv_meta = features.loc[features["feature_strategy"] == "count_vectorizing"].copy()
# Set a flag if count vectorizing will be required
if len(self.cv_meta) > 0:
self.cv = True
# Convert strategy_args column to key word arguments for the sklearn CountVectorizer class
self.cv_meta.loc[:,"strategy_args"] = self.cv_meta.loc[:,"strategy_args"].apply(utils.get_kwargs).\
apply(utils.get_kwargs_by_type)
# Collect features for term frequency inverse document frequency (TF-IDF) vectorizing
self.tfidf_meta = features.loc[features["feature_strategy"] == "tf_idf"].copy()
# Set a flag if tfidf vectorizing will be required
if len(self.tfidf_meta) > 0:
self.tfidf = True
# Convert strategy_args column to key word arguments for the sklearn TfidfVectorizer class
self.tfidf_meta.loc[:,"strategy_args"] = self.tfidf_meta.loc[:,"strategy_args"].apply(utils.get_kwargs).\
apply(utils.get_kwargs_by_type)
# Collect features for text similarity one hot encoding
self.text_meta = features.loc[features["feature_strategy"] == "text_similarity"].copy()
# Set a flag if text similarity OHE will be required
if len(self.text_meta) > 0:
self.text = True
# Collect features for scaling
self.scale_meta = features.loc[features["feature_strategy"] == "scaling"].copy()
# Set a flag if scaling will be required
if len(self.scale_meta) > 0:
self.scale = True
# Collect other features
self.none_meta = features.loc[features["feature_strategy"] == "none"].copy()
# Set a flag if there are features that don't require preprocessing
if len(self.none_meta) > 0:
self.no_prep = True
# Output information to the terminal and log file if required
if self.log is not None:
self._print_log(1)
def fit(self, X, y=None, features=None, retrain=False):
"""
Fit to the training dataset, storing information that will be needed for the transform dataset.
Return the Preprocessor object.
Optionally re-initizialise the object by passing retrain=True, and resending the features dataframe
"""
# Reinitialize this Preprocessor instance if required
if retrain:
if features is None:
features = self.features
self.__init__(features)
# Set up an empty data frame for data to be scaled
scale_df = pd.DataFrame()
ohe_df = None
hash_df = None
cv_df = None
tfidf_df = None
text_df = None
if self.ohe:
# Get a subset of the data that requires one hot encoding
ohe_df = X[self.ohe_meta.index.tolist()]
# Apply one hot encoding to relevant columns
ohe_df = pd.get_dummies(ohe_df, columns=ohe_df.columns)
# Keep a copy of the OHE dataframe structure so we can align the transform dataset
self.ohe_df_structure = pd.DataFrame().reindex_like(ohe_df)
# Scaling needs to be fit exclusively on the training data so as not to influence the results
if self.scale:
# Get a subset of the data that requires scaling
scale_df = X[self.scale_meta.index.tolist()]
if self.hash:
# Get a subset of the data that requires feature hashing
hash_df = X[self.hash_meta.index.tolist()]
hash_cols = hash_df.columns
# Hash unique values for each relevant column and then join to a dataframe for hashed data
for c in hash_cols:
unique = self.hasher(hash_df, c, self.hash_meta["strategy_args"].loc[c])
hash_df = hash_df.join(unique, on=c)
hash_df = hash_df.drop(c, axis=1)
# If hashed columns need to be scaled, these need to be considered when setting up the scaler as well
if self.scale_hashed:
if self.scale:
scale_df = scale_df.join(hash_df)
else:
scale_df = hash_df
if self.cv:
# Get a subset of the data that requires count vectorizing
cv_df = X[self.cv_meta.index.tolist()]
cv_cols = cv_df.columns
# Get count vectors for each relevant column and then join to a dataframe for count vectorized data
for c in cv_cols:
unique = self.text_vectorizer(cv_df, c, type="count", **self.cv_meta["strategy_args"].loc[c])
cv_df = cv_df.join(unique, on=c)
cv_df = cv_df.drop(c, axis=1)
# Keep a copy of the count vectorized dataframe structure so we can align the transform dataset
self.cv_df_structure = pd.DataFrame().reindex_like(cv_df)
# If text vector columns need to be scaled, these need to be considered when setting up the scaler as well
if self.scale_vectors:
if self.scale or (self.scale_hashed and self.hash):
scale_df = scale_df.join(cv_df)
else:
scale_df = cv_df
if self.tfidf:
# Get a subset of the data that requires tfidf vectorizing
tfidf_df = X[self.tfidf_meta.index.tolist()]
tfidf_cols = tfidf_df.columns
# Get tfidf vectors for each relevant column and then join to a dataframe for tfidf vectorized data
for c in tfidf_cols:
unique = self.text_vectorizer(tfidf_df, c, type="tfidf", **self.tfidf_meta["strategy_args"].loc[c])
tfidf_df = tfidf_df.join(unique, on=c)
tfidf_df = tfidf_df.drop(c, axis=1)
# Keep a copy of the tfidf vectorized dataframe structure so we can align the transform dataset
self.tfidf_df_structure = pd.DataFrame().reindex_like(tfidf_df)
# If text vector columns need to be scaled, these need to be considered when setting up the scaler as well
if self.scale_vectors:
if self.scale or (self.scale_hashed and self.hash) or self.cv:
scale_df = scale_df.join(tfidf_df)
else:
scale_df = tfidf_df
if self.text:
# Get a subset of the data that requires text similarity OHE
text_df = X[self.text_meta.index.tolist()]
text_cols = text_df.columns
# Get text similarity OHE for each relevant column and then join to a dataframe for text similarity OHE data
for c in text_cols:
unique = self.text_similarity(text_df, c)
text_df = text_df.join(unique, on=c)
text_df = text_df.drop(c, axis=1)
# Keep a copy of the text similarity OHE dataframe structure so we can align the transform dataset
self.text_df_structure = pd.DataFrame().reindex_like(text_df)
try:
if len(scale_df) > 0:
# Get an instance of the sklearn scaler fit to X
self.scaler_instance = utils.get_scaler(scale_df, missing=self.missing, scaler=self.scaler, **self.kwargs)
# Keep a copy of the scaling dataframe structure so we can align the transform dataset
self.scale_df_structure = pd.DataFrame().reindex_like(scale_df)
except AttributeError:
pass
# Output information to the terminal and log file if required
if self.log is not None:
self._print_log(2, ohe_df=ohe_df, scale_df=scale_df, hash_df=hash_df, cv_df=cv_df, tfidf_df=tfidf_df, text_df=text_df)
return self
def transform(self, X, y=None):
"""
Transform X with the encoding and scaling requirements set by fit().
This function will perform One Hot Encoding, Feature Hashing and Scaling on X.
Returns X_transform as a numpy array or a pandas dataframe based on return_type set in constructor.
"""
X_transform = None
scale_df = pd.DataFrame() # Initialize as empty Data Frame for convenience of concat operations below
ohe_df = None
hash_df = None
cv_df = None
tfidf_df = None
text_df = None
if self.ohe:
# Get a subset of the data that requires one hot encoding
ohe_df = X[self.ohe_meta.index.tolist()]
# Apply one hot encoding to relevant columns
ohe_df = pd.get_dummies(ohe_df, columns=ohe_df.columns)
# Align the columns with the original dataset.
# This is to prevent different number or order of features between training and test datasets.
ohe_df = ohe_df.align(self.ohe_df_structure, join='right', axis=1)[0]
# Fill missing values in the OHE dataframe, that may appear after alignment, with zeros.
ohe_df = utils.fillna(ohe_df, method="zeros")
# Add the encoded columns to the result dataset
X_transform = ohe_df
if self.hash:
# Get a subset of the data that requires feature hashing
hash_df = X[self.hash_meta.index.tolist()]
hash_cols = hash_df.columns
# Hash unique values for each relevant column and then join to a dataframe for hashed data
for c in hash_cols:
unique = self.hasher(hash_df, c, self.hash_meta["strategy_args"].loc[c])
hash_df = hash_df.join(unique, on=c)
hash_df = hash_df.drop(c, axis=1)
# Fill any missing values in the hash dataframe
hash_df = utils.fillna(hash_df, method="zeros")
if self.cv:
# Get a subset of the data that requires count vectorizing
cv_df = X[self.cv_meta.index.tolist()]
cv_cols = cv_df.columns
# Get count vectors for each relevant column and then join to a dataframe for count vectorized data
for c in cv_cols:
unique = self.text_vectorizer(cv_df, c, type="count", **self.cv_meta["strategy_args"].loc[c])
cv_df = cv_df.join(unique, on=c)
cv_df = cv_df.drop(c, axis=1)
# Align the columns with the original dataset.
# This is to prevent different number or order of features between training and test datasets.
cv_df = cv_df.align(self.cv_df_structure, join='right', axis=1)[0]
# Fill missing values in the dataframe that may appear after alignment with zeros.
cv_df = utils.fillna(cv_df, method="zeros")
if self.tfidf:
# Get a subset of the data that requires tfidf vectorizing
tfidf_df = X[self.tfidf_meta.index.tolist()]
tfidf_cols = tfidf_df.columns
# Get tfidf vectors for each relevant column and then join to a dataframe for tfidf vectorized data
for c in tfidf_cols:
unique = self.text_vectorizer(tfidf_df, c, type="tfidf", **self.tfidf_meta["strategy_args"].loc[c])
tfidf_df = tfidf_df.join(unique, on=c)
tfidf_df = tfidf_df.drop(c, axis=1)
# Align the columns with the original dataset.
# This is to prevent different number or order of features between training and test datasets.
tfidf_df = tfidf_df.align(self.tfidf_df_structure, join='right', axis=1)[0]
# Fill missing values in the dataframe that may appear after alignment with zeros.
tfidf_df = utils.fillna(tfidf_df, method="zeros")
if self.text:
# Get a subset of the data that requires text similarity OHE
text_df = X[self.text_meta.index.tolist()]
text_cols = text_df.columns
# Get text similarity OHE for each relevant column and then join to a dataframe for text similarity OHE data
for c in text_cols:
unique = self.text_similarity(text_df, c)
text_df = text_df.join(unique, on=c)
text_df = text_df.drop(c, axis=1)
# Align the columns with the original dataset.
# This is to prevent different number or order of features between training and test datasets.
text_df = text_df.align(self.text_df_structure, join='right', axis=1)[0]
# Fill missing values in the dataframe that may appear after alignment with zeros.
text_df = utils.fillna(text_df, method="zeros")
# Add the text similary OHE data to the result dataset
if X_transform is None:
X_transform = text_df
else:
X_transform = pd.concat([X_transform, text_df], join='outer', axis=1, sort=False)
if self.scale:
# Get a subset of the data that requires scaling
scale_df = X[self.scale_meta.index.tolist()]
# If scale_hashed = True join the hashed columns to the scaling dataframe
if self.hash and self.scale_hashed:
if self.scale:
scale_df = pd.concat([scale_df, hash_df], join='outer', axis=1, sort=False)
else:
scale_df = hash_df
# If only hashed columns are being scaled, the scaler needs to be instantiated
self.scaler_instance = utils.get_scaler(scale_df, missing=self.missing, scaler=self.scaler, **self.kwargs)
elif self.hash:
# Add the hashed columns to the result dataset
if X_transform is None:
X_transform = hash_df
else:
X_transform = pd.concat([X_transform, hash_df], join='outer', axis=1, sort=False)
# If scale_vectors = True join the count vectorized columns to the scaling dataframe
if self.cv and self.scale_vectors:
if self.scale or (self.hash and self.scale_hashed):
scale_df = pd.concat([scale_df, cv_df], join='outer', axis=1, sort=False)
else:
scale_df = cv_df
# If only count vectorized columns are being scaled, the scaler needs to be instantiated
self.scaler_instance = utils.get_scaler(scale_df, missing=self.missing, scaler=self.scaler, **self.kwargs)
elif self.cv:
# Add the count vectorized columns to the result dataset
if X_transform is None:
X_transform = cv_df
else:
X_transform = pd.concat([X_transform, cv_df], join='outer', axis=1, sort=False)
# If scale_vectors = True join the tfidf vectorized columns to the scaling dataframe
if self.tfidf and self.scale_vectors:
if self.scale or (self.hash and self.scale_hashed) or self.cv:
scale_df = pd.concat([scale_df, tfidf_df], join='outer', axis=1, sort=False)
else:
scale_df = tfidf_df
# If only tfidf vectorized columns are being scaled, the scaler needs to be instantiated
self.scaler_instance = utils.get_scaler(scale_df, missing=self.missing, scaler=self.scaler, **self.kwargs)
elif self.tfidf:
# Add the count vectorized columns to the result dataset
if X_transform is None:
X_transform = tfidf_df
else:
X_transform = pd.concat([X_transform, tfidf_df], join='outer', axis=1, sort=False)
try:
# Perform scaling on the relevant data
if len(scale_df) > 0:
# Align the columns with the original dataset.
# This is to prevent different number or order of features between training and test datasets.
scale_df = scale_df.align(self.scale_df_structure, join='right', axis=1)[0]
scale_df = utils.fillna(scale_df, method=self.missing)
scale_df = pd.DataFrame(self.scaler_instance.transform(scale_df), index=scale_df.index, columns=scale_df.columns)
# Add the scaled columns to the result dataset
if X_transform is None:
X_transform = scale_df
else:
X_transform = pd.concat([X_transform, scale_df], join='outer', axis=1, sort=False)
except AttributeError:
pass
if self.no_prep:
# Get a subset of the data that doesn't require preprocessing
no_prep_df = X[self.none_meta.index.tolist()]
# Fill any missing values in the no prep dataframe
no_prep_df = utils.fillna(no_prep_df, method="zeros")
# Finally join the columns that do not require preprocessing to the result dataset
if X_transform is None:
X_transform = no_prep_df
else:
X_transform = pd.concat([X_transform, no_prep_df], join='outer', axis=1, sort=False)
# Output information to the terminal and log file if required
if self.log is not None:
self._print_log(3, ohe_df=ohe_df, scale_df=scale_df, hash_df=hash_df, cv_df=cv_df, tfidf_df=tfidf_df, text_df=text_df, X_transform=X_transform)
if self.return_type == 'np':
return X_transform.values
return X_transform
def fit_transform(self, X, y=None, features=None, retrain=False):
"""
Apply fit() then transform()
"""
if features is None:
features = self.features
return self.fit(X, y, features, retrain).transform(X, y)
def _print_log(self, step, **kwargs):
"""
Output useful information to stdout and the log file if debugging is required.
step: Print the corresponding step in the log
kwargs: dictionary of dataframes to be used in the log
"""
if step == 1:
if self.ohe:
sys.stdout.write("Features for one hot encoding: \n{0}\n\n".format(self.ohe_meta))
with open(self.log,'a', encoding='utf-8') as f:
f.write("Features for one hot encoding: \n{0}\n\n".format(self.ohe_meta))
if self.hash:
sys.stdout.write("Features for hashing: \n{0}\n\n".format(self.hash_meta))
with open(self.log,'a', encoding='utf-8') as f:
f.write("Features for hashing: \n{0}\n\n".format(self.hash_meta))
if self.cv:
sys.stdout.write("Features for count vectorization: \n{0}\n\n".format(self.cv_meta))
with open(self.log,'a', encoding='utf-8') as f:
f.write("Features for count vectorization: \n{0}\n\n".format(self.cv_meta))
if self.tfidf:
sys.stdout.write("Features for tfidf vectorization: \n{0}\n\n".format(self.tfidf_meta))
with open(self.log,'a', encoding='utf-8') as f:
f.write("Features for tfidf vectorization: \n{0}\n\n".format(self.tfidf_meta))
if self.scale:
sys.stdout.write("Features for scaling: \n{0}\n\n".format(self.scale_meta))
with open(self.log,'a', encoding='utf-8') as f:
f.write("Features for scaling: \n{0}\n\n".format(self.scale_meta))
elif step == 2:
if self.ohe:
sys.stdout.write("Fit ohe_df shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['ohe_df'].shape, kwargs['ohe_df'].head()))
with open(self.log,'a', encoding='utf-8') as f:
f.write("Fit ohe_df shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['ohe_df'].shape, kwargs['ohe_df'].head()))
if self.hash:
sys.stdout.write("Fit hash_df shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['hash_df'].shape, kwargs['hash_df'].head()))
with open(self.log,'a', encoding='utf-8') as f:
f.write("Fit hash_df shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['hash_df'].shape, kwargs['hash_df'].head()))
if self.cv:
sys.stdout.write("Fit cv_df shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['cv_df'].shape, kwargs['cv_df'].head()))
with open(self.log,'a', encoding='utf-8') as f:
f.write("Fit cv_df shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['cv_df'].shape, kwargs['cv_df'].head()))
if self.tfidf:
sys.stdout.write("Fit tfidf_df shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['tfidf_df'].shape, kwargs['tfidf_df'].head()))
with open(self.log,'a', encoding='utf-8') as f:
f.write("Fit tfidf_df shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['tfidf_df'].shape, kwargs['tfidf_df'].head()))
try:
if len(kwargs['scale_df']) > 0:
sys.stdout.write("Fit scale_df shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['scale_df'].shape, kwargs['scale_df'].head()))
with open(self.log,'a', encoding='utf-8') as f:
f.write("Fit scale_df shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['scale_df'].shape, kwargs['scale_df'].head()))
except AttributeError:
pass
elif step == 3:
if self.ohe:
sys.stdout.write("Transform ohe_df shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['ohe_df'].shape, kwargs['ohe_df'].head()))
with open(self.log,'a', encoding='utf-8') as f:
f.write("Transform ohe_df shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['ohe_df'].shape, kwargs['ohe_df'].head()))
if self.hash:
sys.stdout.write("Transform hash_df shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['hash_df'].shape, kwargs['hash_df'].head()))
with open(self.log,'a', encoding='utf-8') as f:
f.write("Transform hash_df shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['hash_df'].shape, kwargs['hash_df'].head()))
if self.cv:
sys.stdout.write("Transform cv_df shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['cv_df'].shape, kwargs['cv_df'].head()))
with open(self.log,'a', encoding='utf-8') as f:
f.write("Transform cv_df shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['cv_df'].shape, kwargs['cv_df'].head()))
if self.tfidf:
sys.stdout.write("Transform tfidf_df shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['tfidf_df'].shape, kwargs['tfidf_df'].head()))
with open(self.log,'a', encoding='utf-8') as f:
f.write("Transform tfidf_df shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['tfidf_df'].shape, kwargs['tfidf_df'].head()))
try:
if len(kwargs['scale_df']) > 0:
sys.stdout.write("Transform scale_df shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['scale_df'].shape, kwargs['scale_df'].head()))
with open(self.log,'a', encoding='utf-8') as f:
f.write("Transform scale_df shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['scale_df'].shape, kwargs['scale_df'].head()))
except AttributeError:
pass
try:
sys.stdout.write("X_transform shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['X_transform'].shape, kwargs['X_transform'].head()))
with open(self.log,'a', encoding='utf-8') as f:
f.write("X_transform shape:{0}\nSample Data:\n{1}\n\n".format(kwargs['X_transform'].shape, kwargs['X_transform'].head()))
except AttributeError:
pass
@staticmethod
def hasher(df, col, n_features):
"""
Hash the unique values in the specified column in the given dataframe, creating n_features
"""
unique = pd.DataFrame(df[col].unique(), columns=[col])
fh = FeatureHasher(n_features=n_features, input_type="string")
hashed = fh.fit_transform(unique.loc[:, col])
unique = unique.join(pd.DataFrame(hashed.toarray()).add_prefix(col))
return unique.set_index(col)
@staticmethod
def text_vectorizer(df, col, type="count", **kwargs):
"""
Create count vectors using the sklearn TfidfVectorizer or CountVectorizer for the specified column in the given dataframe.
The type argument can be "tfidf" referring to TfidfVectorizer, anything else defaults to CountVectorizer.
"""
unique = pd.DataFrame(df[col].unique(), columns=[col])
if type == "tfidf":
v = TfidfVectorizer(**kwargs)
else:
v = CountVectorizer(**kwargs)
vectorized = v.fit_transform(unique.loc[:, col])
feature_names = v.get_feature_names()
col_names = []
for i,j in enumerate(feature_names):
col_names.append("{}_{}".format(i,j))
unique = unique.join(pd.DataFrame(vectorized.toarray(), columns=col_names).add_prefix(col+"_"))
return unique.set_index(col)
@staticmethod
def text_similarity(df, col):
"""
Convert strings to their unicode representation and then apply one hot encoding, creating one feature for each unique character in the column.
This can be useful when similarity between strings is significant.
"""
unique = pd.DataFrame(df[col].unique(), columns=[col])
encoded = pd.DataFrame(unique.loc[:,col].apply(lambda s: [ord(a) for a in s]), index=unique.index)
mlb = preprocessing.MultiLabelBinarizer()
encoded = pd.DataFrame(mlb.fit_transform(encoded[col]),columns=mlb.classes_, index=encoded.index).add_prefix(col+"_")
unique = unique.join(encoded)
return unique.set_index(col)
class TargetTransformer:
"""
A class to transform the target variable.
This class can scale the target using the specified sklearn scaler.
It can also make the series stationary by differencing the values. Note that this is only valid when predictions will include multiple samples.
An inverse transform method allows for reversing the transformations.
"""
def __init__(self, scale=True, make_stationary=None, missing="zeros", scaler="StandardScaler", logfile=None, **kwargs):
"""
Initialize the TargetTransformer instance.
scale is a boolean parameter to determine if the target will be scaled.
make_stationary is a parameter to determine if the target will be made stationary. This should only be used for sequential data.
Passing make_stationary='log' will apply a logarithm to the target and use an exponential for the inverse transform.
Passing make_stationary='difference' will difference the values to make the target series stationary.
By default the difference will be done with lag = 1. Alternate lags can be provided by passing a list of lags as a kwarg.
e.g. lags=[1, 12]
missing deteremines how missing values are dealt with before the scaling is applied.
Valid options specified through the missing parameter are: zeros, mean, median, mode
Valid options for scaler are the scaler classes in sklearn.preprocessing
Other kwargs are keyword arguments passed to the sklearn scaler instance.
"""
self.scale = scale
self.make_stationary = make_stationary
self.missing = missing
self.scaler = scaler
self.logfile = logfile
if make_stationary and 'lags' in kwargs:
self.lags = kwargs.pop('lags')
else:
self.lags = [1]
self.kwargs = kwargs
def fit(self, y):
"""
Fit the scaler to target values from the training set.
"""
if self.scale:
# Get an instance of the sklearn scaler fit to y
self.scaler_instance = utils.get_scaler(y, missing=self.missing, scaler=self.scaler, **self.kwargs)
return self
def transform(self, y, array_like=True):
"""
Transform new targets using the previously fit scaler.
Also apply a logarithm or differencing if required for making the series stationary.
array_like determines if y is expected to be multiple values or a single value.
Note that the differencing won't be done if array_like=False.
"""
y_transform = y
# Scale the targets using the previously fit scaler
if self.scale:
y_transform = self.scaler_instance.transform(y)
if isinstance(y, pd.DataFrame):
# The scaler returns a numpy array which needs to be converted back to a data frame
y_transform = pd.DataFrame(y_transform, columns=y.columns, index=y.index)
# Apply a logarithm to make the array stationary
if self.make_stationary == 'log':
y_transform = np.log(y)
# Apply stationarity lags by differencing the array
elif self.make_stationary == 'difference' and array_like:
y_diff = y_transform.copy()
len_y = len(y_diff)
for i in range(max(self.lags), len_y):
for lag in self.lags:
if isinstance(y_diff, (pd.Series, pd.DataFrame)):
y_diff.iloc[i] = y_diff.iloc[i] - y_transform.iloc[i - lag]
else:
y_diff[i] = y_diff[i] - y_transform[i - lag]
# Remove targets with insufficient lag periods
# NOTE: The corresponding samples will need to be dropped at this function call's origin
if isinstance(y_diff, (pd.Series, pd.DataFrame)):
y_transform = y_diff.iloc[max(self.lags):]
else:
y_transform = y_diff[max(self.lags):]
if self.logfile is not None:
self._print_log(1, data=y_transform, array_like=array_like)
return y_transform
def fit_transform(self, y):
"""
Apply fit then transform
"""
return self.fit(y).transform(y)
def inverse_transform(self, y_transform, array_like=True):
"""
Reverse the transformations and return the target in it's original form.
array_like determines if y_transform is expected to be multiple values or a single value.
Note that the differencing won't be done if array_like=False.
"""
if self.scale:
if isinstance(y_transform, pd.DataFrame):
y = self.scaler_instance.inverse_transform(np.reshape(y_transform.values, (-1, 1)))
# The scaler returns a numpy array which needs to be converted back to a data frame
y = pd.DataFrame(y, columns=y_transform.columns, index=y_transform.index)
else:
y = self.scaler_instance.inverse_transform(np.reshape(y_transform, (-1, 1)))
# Apply an exponential to reverse the logarithm applied during transform
if self.make_stationary == 'log':
y = np.exp(y_transform)
# Reverse the differencing applied during transform
# NOTE: y_transform will need to include actual values preceding the lags
elif self.make_stationary == 'difference' and array_like:
y = y_transform.copy()
len_y = len(y_transform)
for i in range(max(self.lags), len_y):
for lag in self.lags:
if isinstance(y, (pd.Series, pd.DataFrame)):
y.iloc[i] = y.iloc[i] + y.iloc[i - lag]
else:
y[i] = y[i] + y[i - lag]
if self.logfile is not None:
self._print_log(2, data=y, array_like=array_like)
return y
def _print_log(self, step, data=None, array_like=True):
"""
Print debug info to the log
"""
# Set mode to append to log file
mode = 'a'
output = ''
if step == 1:
# Output the transformed targets
output = "Targets transformed"
elif step == 2:
# Output sample data after adding lag observations
output = "Targets inverse transformed"
if array_like:
output += " {0}:\nSample Data:\n{1}\n\n".format(data.shape, data.head())
else:
output += " {0}".format(data)
sys.stdout.write(output)
with open(self.logfile, mode, encoding='utf-8') as f:
f.write(output)
class Reshaper(TransformerMixin):
"""
A class that reshapes the feature matrix based on the input_shape.
This class is built for Keras estimators where recurrent and convolutional layers can require 3D or 4D inputs.
It is meant to be used after preprocessing and before fitting the estimator.
"""
def __init__(self, first_layer_kwargs=None, logfile=None, **kwargs):
"""
Initialize the Reshaper with the Keras model first layer's kwargs.
Additionally take in the number of lag observations to be used in reshaping the data.
If lag_target is True, an additional feature will be created for each sample i.e. the previous value of y.
first_layer_kwargs should be a reference to the first layer kwargs of the Keras architecture being used to build the model.
Optional arguments are a logfile to output debug info.
"""
self.first_layer_kwargs = first_layer_kwargs
self.logfile = logfile
def fit(self, X, y=None):
"""
Update the input shape based on the number of samples in X.
Return this Reshaper object.
"""
# Create the input_shape property as a list
self.input_shape = list(self.first_layer_kwargs['input_shape'])
# Debug information is printed to the terminal and logs if required
if self.logfile:
self._print_log(1)
return self
def transform(self, X, y=None):
"""
Apply the new shape to the data provided in X.
X is expected to be a 2D DataFrame of samples and features.
The data will be reshaped according to self.input_shape.
"""
# Add the number of samples to the input_shape
input_shape = self.input_shape.copy()
input_shape.insert(0, X.shape[0])
# Debug information is printed to the terminal and logs if required
if self.logfile:
self._print_log(2, data=input_shape)
# If the final shape is n_samples by n_features we have nothing to do here
if (len(input_shape) == 2):
return X
# 2D, 3D and 4D data is valid.
# e.g. The input_shape can be a tuple of (samples, subsequences, timesteps, features), with subsequences and timesteps as optional.
# A 5D shape may be valid for e.g. a ConvLSTM with (samples, timesteps, rows, columns, features)
if len(input_shape) > 5:
err = "Unsupported input_shape: {}".format(input_shape)
raise Exception(err)
# Reshape the data
elif len(input_shape) > 2:
# Reshape input data using numpy
X_transform = X.values.reshape(input_shape)
# Debug information is printed to the terminal and logs if required
if self.logfile:
self._print_log(3, data=X_transform)
return X_transform
def fit_transform(self, X, y=None):
"""
Apply fit() then transform()
"""
return self.fit(X, y).transform(X, y)
def _print_log(self, step, data=None):
"""
Print debug info to the log
"""
# Set mode to append to log file
mode = 'a'
if step == 1:
# Output the updated input shape
output = "Input shape specification for Keras: {0}\n\n".format(self.first_layer_kwargs['input_shape'])
elif step == 2:
# Output the updated input shape
output = "{0} samples added to the input shape. Data will be reshaped to: {1}\n\n".format(data[0], tuple(data))
elif step == 3:
# Output sample data after reshaping
output = "Input data reshaped to {0}.\nSample Data:\n{1}\n\n".format(data.shape, data[:5])
sys.stdout.write(output)
with open(self.logfile, mode, encoding='utf-8') as f:
f.write(output)
class KerasClassifierForQlik(KerasClassifier):
"""
A subclass of the KerasClassifier Scikit-Learn wrapper.
This class takes in a compiled Keras model as part of sk_params and uses the __call__ method as the default build_fn.
It also stores a histories dataframe to provide metrics for each time the model is fit.
"""
def __init__(self, **sk_params):
"""
Initialize the KerasClassifierForQlik.
The compiled Keras model should be included in sk_params under the 'neural_net' keyword argument.
"""
# Assign the parameters to a class variable
self.sk_params = sk_params
# Set build_fn to the function supplied in sk_params
self.build_fn = self.sk_params.pop('build_fn')
# DataFrame to contain history of every training cycle
# This DataFrame will provide metrics such as loss for each run of the fit method
# Columns will be ['iteration', 'epoch', 'loss'] and any other metrics being calculated during training
self.histories = pd.DataFrame()
self.iteration = 0
# Check the parameters using the super class method
self.check_params(self.sk_params)
def get_params(self, **params):
"""Gets parameters for this estimator.
# Arguments
**params: ignored (exists for API compatibility).
# Returns
Dictionary of parameter names mapped to their values.
"""
res = self.sk_params
# Add back the Keras build function that was popped out of sk_params
res.update({'build_fn': self.build_fn})
return res
def fit(self, x, y, sample_weight=None, **kwargs):
"""
Call the super class' fit method and store metrics from the history.
Also cater for multi-step predictions.
"""
# Match the samples to the targets.
# x and y can be out of sync due to dropped samples in the Reshaper transformer.
if len(y) > len(x):
y = y[len(y)-len(x):]
# Fit the model to the data and store information on the training
history = super().fit(x, y, sample_weight, **kwargs)
sys.stdout.write("\n\nKeras Model Summary:\n")
self.model.summary()
sys.stdout.write("\n\n")
# Set up a data frame with the epochs and a counter to track multiple histories
history_df = pd.DataFrame({'iteration': self.iteration+1, 'epoch': history.epoch})
# Add a column per metric for each epoch e.g. loss, acc
for key in history.history:
history_df[key] = pd.Series(history.history[key])
# Concatenate results from the training to the history data frame
self.histories = pd.concat([self.histories, history_df], sort=True).sort_values(by=['iteration', 'epoch']).reset_index(drop=True)
self.iteration += 1
return history
class KerasRegressorForQlik(KerasRegressor):
"""
A subclass of the KerasRegressor Scikit-Learn wrapper.
This class takes in a compiled Keras model as part of sk_params and uses the __call__ method as the default build_fn.
It also stores a histories dataframe to provide metrics for each time the model is fit.
"""
def __init__(self, **sk_params):
"""
Initialize the KerasRegressorForQlik.
The compiled Keras model should be included in sk_params under the 'neural_net' keyword argument.
"""
# Assign the parameters to a class variable
self.sk_params = sk_params
# Set build_fn to the function supplied in sk_params
self.build_fn = self.sk_params.pop('build_fn')
# DataFrame to contain history of every training cycle
# This DataFrame will provide metrics such as loss for each run of the fit method
# Columns will be ['iteration', 'epoch', 'loss'] and any other metrics being calculated during training
self.histories = pd.DataFrame()
self.iteration = 0
# Check the parameters using the super class method
self.check_params(self.sk_params)
def get_params(self, **params):
"""
Gets parameters for this estimator.
Overrides super class method for compatibility with sklearn cross_validate.
"""
res = self.sk_params
# Add back the Keras build function that was popped out of sk_params
res.update({'build_fn': self.build_fn})
return res
def fit(self, x, y, **kwargs):
"""
Call the super class' fit method and store metrics from the history.
Also cater for multi-step predictions.
"""
# Match the samples to the targets.
# x and y can be out of sync due to dropped samples in the Reshaper transformer.
if len(y) > len(x):
y = y[len(y)-len(x):]
# Fit the model to the data and store information on the training
history = super().fit(x, y, **kwargs)
sys.stdout.write("\n\nKeras Model Summary:\n")
self.model.summary()
sys.stdout.write("\n\n")
# Set up a data frame with the epochs and a counter to track multiple histories
history_df = pd.DataFrame({'iteration': self.iteration+1, 'epoch': history.epoch})
# Add a column per metric for each epoch e.g. loss
for key in history.history:
history_df[key] = pd.Series(history.history[key])
# Concatenate results from the training to the history data frame
self.histories = | pd.concat([self.histories, history_df], sort=True) | pandas.concat |
import unittest, logging, os, sys
from autologging import logged, traced
# Add this path first so it picks up the newest changes without having to rebuild
this_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, this_dir + "/..")
from ffx_helper import FFXEncrypt
from custom_provider import CustomProvider
import util_methods
import pandas as pd
import numpy as np
import datetime
from faker import Faker
logging.basicConfig(level=os.getenv("log_level", "TRACE"))
@logged
class TestAnonymizer(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ffx = FFXEncrypt("<PASSWORD>")
cls.faker = Faker()
cls.faker.add_provider(CustomProvider)
@staticmethod
def sampleAccessDF():
"""Create a sample set of 10 users with access times every 5 minutes
"""
time_start = datetime.datetime(2018, 1, 1)
data = []
types = ['leccap', 'canvas']
for id in range(0,20):
# Create some sample files and 3 users
data.append([id, types[id%2], f'user{id%3}',time_start + datetime.timedelta(minutes=5*id)])
# Create the pandas DataFrame
return pd.DataFrame(data, columns=['file_id', 'file_type', 'user_id', 'access_time'])
def test_ffx_encrypt(self):
self.assertEqual(self.ffx.encrypt("ABC"), 'HZC')
def test_ffx_prefix(self):
self.assertEqual(self.ffx.encrypt(123456, addition=123000), 123000+self.ffx.encrypt(456))
# Test if no prefix found
self.assertEqual(self.ffx.encrypt(995), 643)
def test_ffx_font_case(self):
self.assertEqual(self.ffx.encrypt("something"), "zkcofcwmq")
self.assertEqual(self.ffx.encrypt("someTHing"), "KHbVXLxYV")
self.assertEqual(self.ffx.encrypt("SOMETHING"), "ZKCOFCWMQ")
def test_ffx_decimal(self):
self.assertEqual(self.ffx.encrypt('10.123'), '80.680')
self.assertEqual(self.ffx.encrypt('0.123'), '4.680')
self.assertEqual(self.ffx.encrypt('.567'), '.80')
self.assertEqual(self.ffx.encrypt('-100.0'), '-149.4')
self.assertEqual(self.ffx.encrypt('23.55'), '58.68')
self.assertEqual(self.ffx.encrypt('-.567'), '-.80')
self.assertEqual(self.ffx.encrypt('-12.567'), '-25.80')
self.assertEqual(self.ffx.encrypt('6.000'), '0.923')
self.assertEqual(self.ffx.encrypt('1.23E-2'), '5.58C-6')
self.assertEqual(self.ffx.encrypt('-1.23e2'), '-5.58c6')
def test_ffx_special(self):
self.assertEqual(self.ffx.encrypt('<EMAIL>'), '<EMAIL>')
self.assertEqual(self.ffx.encrypt('test@@@@gmail.colll099m'), 'cafw@@@@nyzav.ddzvu628k')
self.assertEqual(self.ffx.encrypt('123abc.456.bda123'), '680hzc.605.qfu680')
def test_assignment_custom(self):
self.faker.seed(util_methods.hash_string_to_int("testpasstestpass", 16))
# pylint: disable=no-member
self.assertEqual(self.faker.assignment(),"Practice Assignment #196")
# pylint: disable=no-member
self.assertEqual(self.faker.assignment(), "Architecture Assignment #819")
def test_date_time_on_date(self):
self.faker.seed(util_methods.hash_string_to_int("testpasstestpass", 16))
# pylint: disable=no-member
self.assertEqual(self.faker.date_time_on_date(datetime.datetime(2019, 1, 1, 1, 1, 1)), datetime.datetime(2019, 1, 1, 6, 58, 12))
# Adding the string case for date_time_on_date
# pylint: disable=no-member
self.assertEqual(self.faker.date_time_on_date("2019-05-01 13:14:15"), datetime.datetime(2019, 5, 1, 11, 58, 39))
def test_course_id(self):
self.faker.seed(util_methods.hash_string_to_int("testpasstestpass", 16))
self.assertEqual(self.faker.course(),"AUTO 296 006 FA 2073") #pylint: disable=no-member
self.assertEqual(self.faker.course(), "AUTO 273 007 SP 2026") #pylint: disable=no-member
def test_resample(self):
# These will always be different values returnsd, just verify that the length is the same and they are within the original range
test_vals = [21, 129, 123, 94]
map_sample = util_methods.kde_resample(test_vals)
self.assertEqual(len(map_sample), 4)
self.assertTrue(min(test_vals) <= min(map_sample))
self.assertTrue(max(test_vals) >= max(map_sample))
def test_shuffle(self):
df = self.sampleAccessDF()
# Seed the randomizer so it's predictable for test
np.random.seed(util_methods.hash_string_to_int("testpasstestpass", 8))
# Assert the 9th row is 45 minutes
self.assertEqual(df.at[9, 'access_time'], pd.Timestamp('2018-01-01 00:45:00'))
util_methods.shuffle(df, shuffle_col='access_time')
# Assert the 3rd row is 45 minutes
self.assertEqual(df.at[3, 'access_time'], | pd.Timestamp('2018-01-01 00:45:00') | pandas.Timestamp |
from datetime import datetime as dt
import pandas as pd
import numpy as np
from adsb_track.const import *
import adsb_track.const as const
class SessionData:
def __init__(self, df_ident, df_velocity, df_position):
self.df_ident = df_ident
self.df_velocity = df_velocity
self.df_position = df_position
self.unique_icao = np.unique(
np.concatenate([
x[const.ICAO].unique()
for x in (df_ident, df_velocity, df_position)
]))
def unique_icao(self):
"""Unique ICAO addresses in the message dataframes.
Args:
message_dataframes (Iterable[pandas.DataFrame]): The dataframes from
the session output. The column with ICAO24 codes is expected to
be 'icao'.
Returns:
Iterable[str]: The unique ICAO24 codes found across the input
dataframes.
"""
return self.unique_icao
def isolate_icao(self, icao):
"""Isolates the messages of an aircraft from many dataframes.
Args:
icao (str): The ICAO24 code to isolate.
Returns:
Iterable[pandas.DataFrame]: A deepcopy of a subset of the input
message_dataframes where the ICAO24 code matches the icao input
parameter.
"""
if icao not in self.unique_icao:
return ValueError(f'The ICAO24 code {icao} could not be found in '
'any messages.')
return tuple([
x[x[const.ICAO] == icao].copy()
for x in (self.df_ident, self.df_velocity, self.df_position)
])
def build_track(self, icao):
"""Constructs the track of an aircraft from different types of messages.
Args:
icao (str): The ICAO24 code to isolate.
Returns:
pandas.DataFrame: A single dataframe constructed with the most
recent information of each type.
"""
if icao not in self.unique_icao:
return ValueError(f'The ICAO24 code {icao} could not be found in '
'any messages.')
df_ident, df_velocity, df_position = self.isolate_icao(icao)
df_ident[const.MSG_TYPE] = const.IDENT
df_velocity[const.MSG_TYPE] = const.VELOCITY
df_position[const.MSG_TYPE] = const.POSITION
df = | pd.concat([df_ident, df_velocity, df_position]) | pandas.concat |
''' Google API-based feature extraction classes. '''
from pliers.extractors.image import ImageExtractor
from pliers.extractors.text import TextExtractor
from pliers.extractors.video import VideoExtractor
from pliers.transformers import (GoogleAPITransformer,
GoogleVisionAPITransformer,
GoogleAPITransformer)
from pliers.extractors.base import ExtractorResult
from pliers.utils import flatten_dict
import numpy as np
import pandas as pd
import logging
import time
import warnings
import os
from collections import defaultdict
class GoogleVisionAPIExtractor(GoogleVisionAPITransformer, ImageExtractor):
''' Base class for all Extractors that use the Google Vision API. '''
VERSION = '1.0'
def _extract(self, stims):
request = self._build_request(stims)
responses = self._query_api(request)
results = []
for i, response in enumerate(responses):
if response and self.response_object in response:
raw = response[self.response_object]
results.append(ExtractorResult(raw, stims[i], self))
elif 'error' in response:
raise Exception(response['error']['message'])
else:
results.append(ExtractorResult([{}], stims[i], self))
return results
class GoogleVisionAPIFaceExtractor(GoogleVisionAPIExtractor):
''' Identifies faces in images using the Google Cloud Vision API. '''
request_type = 'FACE_DETECTION'
response_object = 'faceAnnotations'
def _to_df(self, result, handle_annotations=None):
'''
Converts a Google API Face JSON response into a Pandas Dataframe.
Args:
result (ExtractorResult): Result object from which to parse out a
Dataframe.
handle_annotations (str): How returned face annotations should be
handled in cases where there are multiple faces.
'first' indicates to only use the first face JSON object, all
other values will default to including every face.
'''
annotations = result._data
if handle_annotations == 'first':
annotations = [annotations[0]]
face_results = []
for i, annotation in enumerate(annotations):
data_dict = {}
for field, val in annotation.items():
if 'Confidence' in field:
data_dict['face_' + field] = val
elif 'oundingPoly' in field:
for j, vertex in enumerate(val['vertices']):
for dim in ['x', 'y']:
name = '%s_vertex%d_%s' % (field, j+1, dim)
val = vertex[dim] if dim in vertex else np.nan
data_dict[name] = val
elif field == 'landmarks':
for lm in val:
name = 'landmark_' + lm['type'] + '_%s'
lm_pos = {name %
k: v for (k, v) in lm['position'].items()}
data_dict.update(lm_pos)
else:
data_dict[field] = val
face_results.append(data_dict)
return pd.DataFrame(face_results)
class GoogleVisionAPILabelExtractor(GoogleVisionAPIExtractor):
''' Labels objects in images using the Google Cloud Vision API. '''
request_type = 'LABEL_DETECTION'
response_object = 'labelAnnotations'
def _to_df(self, result):
res = {label['description']: label['score'] for label in result._data if label}
return | pd.DataFrame([res]) | pandas.DataFrame |
"""
DatabaseRates object created from rates downloaded from the URDB,
https://openei.org.
"""
#public
import sys
import glob
import logging
import warnings
import numpy as np
import pandas as pd
from datetime import datetime
sys.path.append('../')
import config as config
import lcoc.readwrite as readwrite
import lcoc.helpers as helpers
#settings
pd.options.mode.chained_assignment = None
class DatabaseRates(object):
"""
Object for working with data downloaded from NREL's Utility Rate
Database (URDB). Rates in the URDB are checked at updated annually by NREL
under funding from the U.S. Department of Energy's Solar Energy
Technologies Program, in partnership with Illinois State University's
Intstitute for Regulatory Policy Studies.
Attributes
-----------
source:
URL used to download URDB data
rate_data:
pandas.DataFrame where each row represents a unique utility rate,
unfiltered & unprocessed from the URDB.
res_rate_data:
pandas.DataFrame where each row represents a unique residential utility
rate
com_rate_data:
pandas.DataFrame where each row represents a unique commerical utility
rate
prev_exists:
Boolean indicating whether version of dataset has been previously ran
"""
def __init__(self, urdb_file=None):
# Download URDB data
self.source='https://openei.org/apps/USURDB/download/usurdb.csv.gz'
# Load URDB data
if urdb_file is not None:
self.rate_data = | pd.read_csv(urdb_file, low_memory=False) | pandas.read_csv |
import logging
import os
from enum import Enum
import numpy as np
import pandas as pd
from natsort import index_natsorted, order_by_index
LOG = logging.getLogger()
class CorrectnessResult:
def __init__(self, status, detail=None, truth=[], result=[]):
self.status = status
self.detail = detail
self.truth = truth
self.result = result
@classmethod
def make_mismatch_result(cls, detail, truth, result):
return cls('MISMATCH', detail=detail, truth=truth, result=result)
@classmethod
def make_ok_result(cls):
return cls('OK')
@property
def is_ok(self):
return self.status == 'OK'
@property
def is_mismatch(self):
return self.status == 'MISMATCH'
def to_html(self):
status = self.status
if self.is_ok:
return status
def check_for_empty_df_then_convert_to_html(df):
if isinstance(df, list) and df == []:
return 'None'
else:
return df.to_html()
# HTML here, since it'll be used for reporting to HTML
truth_html = check_for_empty_df_then_convert_to_html(self.truth)
result_html = check_for_empty_df_then_convert_to_html(self.result)
return f'{status}<br /><div>{truth_html}</div><br /><div>{result_html}</div>'
def __repr__(self):
return self.status
class ResultDetail(Enum):
OK = 1
TRUTH_EMPTY = 2
RESULT_EMPTY = 3
SHAPE_MISMATCH = 4
COLUMNS_MISMATCH = 5
VALUE_MISMATCH = 6
class Correctness:
def __init__(self, scale_factor, benchmark):
self.scale_factor = scale_factor
self.query_output_folder = os.path.join('results', 'query_results')
self.correctness_results_folder = os.path.join('correctness_results',
benchmark, f'sf{self.scale_factor}')
def get_correctness_filepath(self, query_id):
filepath = os.path.join(self.correctness_results_folder, f'{query_id}.csv')
return filepath
@classmethod
def round_to_precision(cls, value):
rounded = ('%.2f' % value)
if "." in rounded:
return rounded[0:13]
else:
return rounded[0:12]
@classmethod
def match_double_precision(cls, truth_value, result_value):
truth_rounded = cls.round_to_precision(truth_value)
result_rounded = cls.round_to_precision(result_value)
return truth_rounded == result_rounded or abs(truth_value - result_value) <= 0.01
def prepare(self, df):
# Sort columns
df = df.sort_index(axis=1)
# Natsort all rows
df = df.reindex(index=order_by_index(df.index, index_natsorted(zip(df.to_numpy()))))
# Recreate index for comparison later
df.reset_index(level=0, drop=True, inplace=True)
return df
@classmethod
def check_for_mismatches(cls, truth, result):
merge = truth.merge(result, indicator=True, how='left')
differences = merge.loc[lambda x: x['_merge'] != 'both']
mismatches = []
for index, _ in differences.iterrows():
truth_row = truth.iloc[index]
result_row = result.iloc[index]
for column_name, truth_datum in truth_row.iteritems():
result_datum = result_row[column_name]
if truth.dtypes[column_name] == 'float64':
if np.isnan(truth_datum):
matches = (np.isnan(result_datum) == True)
elif np.isinf(truth_datum):
matches = (np.isinf(result_datum) == True)
else:
matches = cls.match_double_precision(truth_datum, result_datum)
elif truth.dtypes[column_name] == 'object':
matches = (str(truth_datum) == str(result_datum))
else:
matches = (truth_datum == result_datum)
if not matches:
mismatches.append(index)
break
return mismatches
def _check_correctness_impl(self, truth, result):
if truth.empty != result.empty:
return (ResultDetail.TRUTH_EMPTY, None) if truth.empty else (ResultDetail.RESULT_EMPTY, None)
if truth.shape != result.shape:
return (ResultDetail.SHAPE_MISMATCH, None)
truth.drop_duplicates(inplace=True, ignore_index=True)
result.drop_duplicates(inplace=True, ignore_index=True)
if truth.shape != result.shape:
LOG.debug("Rows mismatch after dropping duplicates")
return (ResultDetail.SHAPE_MISMATCH, None)
truth = self.prepare(truth)
result = self.prepare(result)
# Column names must be same
if not truth.columns.difference(result.columns).empty:
return (ResultDetail.COLUMNS_MISMATCH, None)
mismatch_indices = Correctness.check_for_mismatches(truth, result)
if mismatch_indices:
return (ResultDetail.VALUE_MISMATCH, mismatch_indices)
return (ResultDetail.OK, None)
def check_correctness(self, stream_id, query_number):
LOG.debug(f'Checking Stream={stream_id}, Query={query_number}')
correctness_path = self.get_correctness_filepath(query_number)
benchmark_path = os.path.join(self.query_output_folder, f'{stream_id}_{query_number}.csv')
# Reading truth
try:
truth = | pd.read_csv(correctness_path) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 4 22:33:07 2018
@author: bruce
"""
import pandas as pd
import numpy as np
from scipy import fftpack
from scipy import signal
import matplotlib.pyplot as plt
import os
# set saving path
path_result_freq = "/home/bruce/Dropbox/Project/5.Result/5.Result_Nov/2.freq_domain/"
def correlation_matrix(corr_mx, cm_title):
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(corr_mx, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
#plt.title('cross correlation of test and retest')
ylabels = ['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels = ['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
#fig.colorbar(cax, ticks=[.75,.8,.85,.90,.95,1])
plt.show()
def correlation_matrix_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
# otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
# cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='binary')
# cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
# fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_min_01_comb(corr_mx1 ,corr_mx2, cm_title1, cm_title2):
# find the minimum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
# otherwise it is not working
temp = np.asarray(corr_mx1)
output1 = (temp == temp.min(axis=1)[:,None]) # along rows
temp = np.asarray(corr_mx2)
output2 = (temp == temp.min(axis=1)[:,None]) # along rows
fig, (ax1, ax2) = plt.subplots(1, 2)
# figure 1
im1 = ax1.matshow(output1, cmap='binary')
#fig.colorbar(im1, ax1)
ax1.grid(False)
ax1.set_title(cm_title1)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# figure 2
im2 = ax2.matshow(output2, cmap='binary')
#fig.colorbar(im2, ax2)
ax2.grid(False)
ax2.set_title(cm_title2)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax2.set_xticks(np.arange(len(xlabels)))
ax2.set_yticks(np.arange(len(ylabels)))
ax2.set_xticklabels(xlabels,fontsize=6)
ax2.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_tt_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
# otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels, fontsize=6)
ax1.set_yticklabels(ylabels, fontsize=6)
plt.show()
def correlation_matrix_rr_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
# cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
# cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
# eg: plot_mag_db(df_as_85_vsc, 1, "Subject")
def fig_mag_db(signal_in, subject_number = 'subject_number', title = 'title', filename = 'filename'):
plt.figure()
plt.subplot(2,1,1)
plt.plot(signal_in.iloc[2*(subject_number-1), :48030], '-')
plt.plot(signal_in.iloc[2*(subject_number-1)+1, :48030], '-')
plt.ylabel('magnitude')
plt.legend(('Retest', 'Test'), loc='upper right')
plt.title(title)
# plt.subplot(2,1,2)
# plt.plot(signal_in.iloc[2*(subject_number-1), :48030].apply(f_dB), '-')
# plt.plot(signal_in.iloc[2*(subject_number-1)+1, :48030].apply(f_dB), '-')
# plt.xlabel('Frequency(Hz)')
# plt.ylabel('dB')
# plt.xlim(0,10000)
# plt.legend(('Retest', 'Test'), loc='lower right')
plt.show()
plt.savefig(filename)
# plot time domain signal in one figure
def fig_time_in_1(signal_in, title = 'title'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 100, 0.09765625)
plt.plot(x_label, signal_in.iloc[2*i, :1024], '-')
plt.plot(x_label, signal_in.iloc[2*i+1, :1024], '-')
plt.ylabel(sub_title[i])
plt.legend(('Retest', 'Test'), loc='upper right', fontsize='xx-small')
if i < 20:
plt.xticks([])
else:
plt.xlabel('Time (ms)')
plt.suptitle(title) # add a centered title to the figure
plt.show()
# plot frequency domain signal in one figure
def fig_mag_in_1(signal_in, title = 'title'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 4803, 0.1)
plt.plot(x_label, signal_in.iloc[2*i, :48030], '-')
plt.plot(x_label, signal_in.iloc[2*i+1, :48030], '-')
plt.ylabel(sub_title[i])
plt.xlim(0,1300)
plt.legend(('Retest', 'Test'), loc='upper right', fontsize='xx-small')
if i < 20:
plt.xticks([])
else:
plt.xlabel('Frequency(Hz)')
plt.suptitle(title) # add a centered title to the figure
plt.show()
def fig_test_in_1(signal_in_1, signal_in_2, title = 'title', path = 'path', filename = 'filename'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 4803, 0.1)
plt.plot(x_label, signal_in_1.iloc[2*i, :48030], '-')
plt.plot(x_label, signal_in_2.iloc[2*i, :48030], '-')
plt.ylabel(sub_title[i])
plt.xlim(0,1000)
plt.legend(('no window', 'window'), loc='upper right', fontsize='xx-small')
plt.suptitle(title) # add a centered title to the figure
plt.show()
plt.savefig(os.path.join(path, filename), dpi=300)
def fig_retest_in_1(signal_in_1, signal_in_2, title = 'title', path = 'path', filename = 'filename'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 4803, 0.1)
plt.plot(x_label, signal_in_1.iloc[2*i+1, :48030], '-')
plt.plot(x_label, signal_in_2.iloc[2*i+1, :48030], '-')
plt.ylabel(sub_title[i])
plt.xlim(0,1000)
plt.legend(('no window', 'window'), loc='upper right', fontsize='xx-small')
plt.suptitle(title) # add a centered title to the figure
plt.show()
plt.savefig(os.path.join(path, filename), dpi=300)
def distance_mx(sig_in):
# freq_range -> from 0 to ???
freq_range = 13000
matrix_temp = np.zeros((22, 22))
matrix_temp_square = np.zeros((22, 22))
for i in range(22):
for j in range(22):
temp = np.asarray(sig_in.iloc[2*i, 0:freq_range] - sig_in.iloc[2*j+1, 0:freq_range])
temp_sum = 0
temp_square_sum = 0
for k in range(freq_range):
#test_t3 = (abs(temp_series[k]))**2
#print(test_t3)
temp_sum = temp_sum + abs(temp[k])
temp_square_sum = temp_square_sum + (abs(temp[k]))**2
matrix_temp[i][j] = temp_sum
matrix_temp_square[i][j] = temp_square_sum
output_1 = pd.DataFrame(matrix_temp)
output_2 = pd.DataFrame(matrix_temp_square)
# output 1 is similar with euclidian diatance eg. x1+jy1 -> sqrt(x1**2 + y1**2)
# output 1 is square result eg. x1+jy1 -> x1**2 + y1**2
return output_1, output_2
def complex_coherence_mx(input_signal):
# compute the magnitude squared coherence based on signal.coherence
# then create the matrix with values
# higher value -> better coherence value
sig_in = input_signal.copy()
matrix_temp = np.zeros((22, 22))
for i in range(22):
for j in range(22):
# temp here is the
temp_sum = 0
sig_in_1 = np.array(sig_in.iloc[2*i, :])
sig_in_2 = np.array(sig_in.iloc[2*j+1, :])
# signal 9606Hz length 106.6ms window length 10ms -> nperseg=96
f, temp_Cxy = signal.coherence(sig_in_1, sig_in_2, fs=9606, nperseg=96)
# delete values lower than 0.01
for l in range(len(temp_Cxy)):
if temp_Cxy[l] < 0.1:
temp_Cxy[l] = 0
# delete finish
# test
'''
if i ==0 and j == 0:
plt.figure()
plt.semilogy(f, temp_Cxy)
plt.title("test in complex_coherence_mx")
plt.show()
'''
# test finish
for k in range(len(temp_Cxy)):
#test_t3 = (abs(temp_series[k]))**2
#print(test_t3)
temp_sum = temp_sum + abs(temp_Cxy[k])
matrix_temp[i][j] = temp_sum
output_3 = pd.DataFrame(matrix_temp)
return output_3
def fig_coherence_in_1(signal_in, threshold_Cxy = None, title = 'title', title2 = 'title2'):
# threshold_Cxy is used for setting minimum value
Cxy_sum = pd.DataFrame()
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
sig_in_1 = signal_in.iloc[i, :]
sig_in_2 = signal_in.iloc[i+22, :]
# signal 9606Hz length 106.6ms window length 10ms -> nperseg=96
# no zero padding
# f, temp_Cxy = signal.coherence(sig_in_1, sig_in_2, fs=9606, nperseg=128)
# with zero padding
f, temp_Cxy = signal.coherence(sig_in_1, sig_in_2, fs = 9606, nperseg=512, nfft=19210)
# print("shape of temp_Cxy is")
# print (temp_Cxy.shape)
# delete value lower than 0.05
if (threshold_Cxy != None):
for l in range(len(temp_Cxy)):
if temp_Cxy[l] < threshold_Cxy:
temp_Cxy[l] = 0
# delete finish
Cxy_sum = Cxy_sum.append(pd.DataFrame(np.reshape(temp_Cxy, (1,9606))), ignore_index=True)
plt.subplot(11,2,i+1)
plt.plot(f, temp_Cxy)
plt.ylabel(sub_title[i])
plt.xlim(0,2000)
plt.legend(('Retest', 'Test'), loc='upper right', fontsize='xx-small')
plt.suptitle(title) # add a centered title to the figure
plt.show()
# plot aveerage of 22 subjects
plt.figure()
plt.subplot(1,1,1)
Cxy_avg = Cxy_sum.mean(axis=0)
plt.plot(f, Cxy_avg)
plt.title('average of 22 subjects based on '+ title2)
plt.xlim(0,2000)
plt.show()
#################################
f_dB = lambda x : 20 * np.log10(np.abs(x))
# import the pkl file
# for linux
df_EFR=pd.read_pickle('/home/bruce/Dropbox/4.Project/4.Code for Linux/df_EFR.pkl')
# for mac
# df_EFR=pd.read_pickle('/Users/bruce/Dropbox/Project/4.Code for Linux/df_EFR.pkl')
# remove DC offset
df_EFR_detrend = pd.DataFrame()
for i in range(1408):
# combine next two rows later
df_EFR_detrend_data = pd.DataFrame(signal.detrend(df_EFR.iloc[i: i+1, 0:1024], type='constant').reshape(1,1024))
df_EFR_label = pd.DataFrame(df_EFR.iloc[i, 1024:1031].values.reshape(1,7))
df_EFR_detrend = df_EFR_detrend.append(pd.concat([df_EFR_detrend_data, df_EFR_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_detrend.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_detrend = df_EFR_detrend.reset_index(drop=True)
df_EFR = df_EFR_detrend
# Time domain
# Define window function
win_kaiser = signal.kaiser(1024, beta=14)
win_hamming = signal.hamming(1024)
# average the df_EFR
df_EFR_avg = pd.DataFrame()
df_EFR_avg_win = pd.DataFrame()
# average test1 and test2
for i in range(704):
# combine next two rows later
df_EFR_avg_t = pd.DataFrame(df_EFR.iloc[2*i: 2*i+2, 0:1024].mean(axis=0).values.reshape(1,1024)) # average those two rows
# implement the window function
df_EFR_avg_t_win = pd.DataFrame((df_EFR_avg_t.iloc[0,:] * win_hamming).values.reshape(1,1024))
df_EFR_label = pd.DataFrame(df_EFR.iloc[2*i, 1024:1031].values.reshape(1,7))
df_EFR_avg = df_EFR_avg.append( | pd.concat([df_EFR_avg_t, df_EFR_label], axis=1, ignore_index=True) | pandas.concat |
#Import necessary package
import requests
import re
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import datetime as dt
import configparser
import os
import json
#Configure parameter
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), 'config.ini'))
mall = config['general']['mall']
shoplisturl = config['url']['shoplisturl']
fnblisturl = config['url']['fnblisturl']
shopdetailbasicurl = config['url']['shopdetailbasicurl']
#Get shop category data and export into csv
def getShopCategory():
#Create empty DataFrame for shop category
shopcategory = pd.DataFrame()
for type, url in zip(['Shopping','Dining'],[shoplisturl,fnblisturl]):
#Get shop category
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
for category_selected in soup.find_all('select', class_ = 'categorySelected'):
for cat in category_selected.find_all('option'):
try:
shop_category_id = cat.get('value')
except:
shop_category_id = np.nan
try:
shop_category_name = cat.text.split('\r\n')[0].strip()
except:
shop_category_name = np.nan
shopcategory = shopcategory.append(
{
'type':type,
'shop_category_id':shop_category_id,
'shop_category_name':shop_category_name
}, ignore_index=True
)
shopcategory['update_date'] = dt.date.today()
shopcategory['mall'] = mall
shopcategory.drop(shopcategory[shopcategory.shop_category_name == 'All'].index, inplace = True)
shopcategory = shopcategory.loc[:, ['mall','type','shop_category_id','shop_category_name','update_date']]
return shopcategory
#Get shop master data and export into csv
def getShopMaster():
shopcategory = getShopCategory()
#Create empty DataFrame for shop master
shoplist = pd.DataFrame()
shoplisttc = pd.DataFrame()
shopdetail = | pd.DataFrame() | pandas.DataFrame |
""" lineage
tools for genetic genealogy and the analysis of consumer DNA test results
"""
"""
MIT License
Copyright (c) 2016 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import datetime
from itertools import chain, combinations
import logging
import os
import numpy as np
import pandas as pd
from snps.utils import Parallelizer, create_dir, save_df_as_csv
from lineage.individual import Individual
from lineage.resources import Resources
from lineage.visualization import plot_chromosomes
# set version string with Versioneer
from lineage._version import get_versions
__version__ = get_versions()["version"]
del get_versions
logger = logging.getLogger(__name__)
class Lineage:
""" Object used to interact with the `lineage` framework. """
def __init__(
self,
output_dir="output",
resources_dir="resources",
parallelize=False,
processes=os.cpu_count(),
):
""" Initialize a ``Lineage`` object.
Parameters
----------
output_dir : str
name / path of output directory
resources_dir : str
name / path of resources directory
parallelize : bool
utilize multiprocessing to speedup calculations
processes : int
processes to launch if multiprocessing
"""
self._output_dir = output_dir
self._resources_dir = resources_dir
self._resources = Resources(resources_dir=resources_dir)
self._parallelizer = Parallelizer(parallelize=parallelize, processes=processes)
def create_individual(self, name, raw_data=(), **kwargs):
""" Initialize an individual in the context of the `lineage` framework.
Parameters
----------
name : str
name of the individual
raw_data : str, bytes, ``SNPs`` (or list or tuple thereof)
path(s) to file(s), bytes, or ``SNPs`` object(s) with raw genotype data
**kwargs
parameters to ``snps.SNPs`` and/or ``snps.SNPs.merge``
Returns
-------
Individual
``Individual`` initialized in the context of the `lineage` framework
"""
if "output_dir" not in kwargs:
kwargs["output_dir"] = self._output_dir
if "resources_dir" not in kwargs:
kwargs["resources_dir"] = self._resources_dir
return Individual(name, raw_data, **kwargs)
def download_example_datasets(self):
""" Download example datasets from `openSNP <https://opensnp.org>`_.
Per openSNP, "the data is donated into the public domain using `CC0 1.0
<http://creativecommons.org/publicdomain/zero/1.0/>`_."
Returns
-------
paths : list of str or empty str
paths to example datasets
References
----------
1. <NAME>, <NAME>, <NAME>, <NAME> (2014), "openSNP-A Crowdsourced Web Resource
for Personal Genomics," PLOS ONE, 9(3): e89204,
https://doi.org/10.1371/journal.pone.0089204
"""
paths = self._resources.download_example_datasets()
if "" in paths:
logger.warning("Example dataset(s) not currently available")
return paths
def find_discordant_snps(
self, individual1, individual2, individual3=None, save_output=False
):
""" Find discordant SNPs between two or three individuals.
Parameters
----------
individual1 : Individual
reference individual (child if `individual2` and `individual3` are parents)
individual2 : Individual
comparison individual
individual3 : Individual
other parent if `individual1` is child and `individual2` is a parent
save_output : bool
specifies whether to save output to a CSV file in the output directory
Returns
-------
pandas.DataFrame
discordant SNPs and associated genetic data
References
----------
1. <NAME>, "Search for Discordant SNPs in Parent-Child
Raw Data Files," David Pike's Utilities,
http://www.math.mun.ca/~dapike/FF23utils/pair-discord.php
2. <NAME>, "Search for Discordant SNPs when given data
for child and both parents," David Pike's Utilities,
http://www.math.mun.ca/~dapike/FF23utils/trio-discord.php
"""
self._remap_snps_to_GRCh37([individual1, individual2, individual3])
df = individual1.snps
# remove nulls for reference individual
df = df.loc[df["genotype"].notnull()]
# add SNPs shared with `individual2`
df = df.join(individual2.snps["genotype"], rsuffix="2")
genotype1 = "genotype_" + individual1.get_var_name()
genotype2 = "genotype_" + individual2.get_var_name()
if individual3 is None:
df = df.rename(columns={"genotype": genotype1, "genotype2": genotype2})
# find discordant SNPs between reference and comparison individuals
df = df.loc[
df[genotype2].notnull()
& (
(df[genotype1].str.len() == 1)
& (df[genotype2].str.len() == 1)
& (df[genotype1] != df[genotype2])
)
| (
(df[genotype1].str.len() == 2)
& (df[genotype2].str.len() == 2)
& (df[genotype1].str[0] != df[genotype2].str[0])
& (df[genotype1].str[0] != df[genotype2].str[1])
& (df[genotype1].str[1] != df[genotype2].str[0])
& (df[genotype1].str[1] != df[genotype2].str[1])
)
]
if save_output:
save_df_as_csv(
df,
self._output_dir,
"discordant_snps_{}_{}_GRCh37.csv".format(
individual1.get_var_name(), individual2.get_var_name()
),
comment=self._get_csv_header(),
prepend_info=False,
)
else:
# add SNPs shared with `individual3`
df = df.join(individual3.snps["genotype"], rsuffix="3")
genotype3 = "genotype_" + individual3.get_var_name()
df = df.rename(
columns={
"genotype": genotype1,
"genotype2": genotype2,
"genotype3": genotype3,
}
)
# find discordant SNPs between child and two parents
df = df.loc[
(
df[genotype2].notnull()
& (
(df[genotype1].str.len() == 1)
& (df[genotype2].str.len() == 1)
& (df[genotype1] != df[genotype2])
)
| (
(df[genotype1].str.len() == 2)
& (df[genotype2].str.len() == 2)
& (df[genotype1].str[0] != df[genotype2].str[0])
& (df[genotype1].str[0] != df[genotype2].str[1])
& (df[genotype1].str[1] != df[genotype2].str[0])
& (df[genotype1].str[1] != df[genotype2].str[1])
)
)
| (
df[genotype3].notnull()
& (
(df[genotype1].str.len() == 1)
& (df[genotype3].str.len() == 1)
& (df[genotype1] != df[genotype3])
)
| (
(df[genotype1].str.len() == 2)
& (df[genotype3].str.len() == 2)
& (df[genotype1].str[0] != df[genotype3].str[0])
& (df[genotype1].str[0] != df[genotype3].str[1])
& (df[genotype1].str[1] != df[genotype3].str[0])
& (df[genotype1].str[1] != df[genotype3].str[1])
)
)
| (
df[genotype2].notnull()
& df[genotype3].notnull()
& (df[genotype2].str.len() == 2)
& (df[genotype2].str[0] == df[genotype2].str[1])
& (df[genotype2] == df[genotype3])
& (df[genotype1] != df[genotype2])
)
]
if save_output:
save_df_as_csv(
df,
self._output_dir,
"discordant_snps_{}_{}_{}_GRCh37.csv".format(
individual1.get_var_name(),
individual2.get_var_name(),
individual3.get_var_name(),
),
comment=self._get_csv_header(),
prepend_info=False,
)
return df
def find_shared_dna(
self,
individuals=(),
cM_threshold=0.75,
snp_threshold=1100,
shared_genes=False,
save_output=True,
genetic_map="HapMap2",
):
""" Find the shared DNA between individuals.
Computes the genetic distance in centiMorgans (cMs) between SNPs using the specified genetic
map. Applies thresholds to determine the shared DNA. Plots shared DNA. Optionally determines
shared genes (i.e., genes transcribed from the shared DNA).
All output is saved to the output directory as `CSV` or `PNG` files.
Notes
-----
The code is commented throughout to help describe the algorithm and its operation.
To summarize, the algorithm first computes the genetic distance in cMs between SNPs
common to all individuals using the specified genetic map.
Then, individuals are compared for whether they share one or two alleles for each SNP in
common; in this manner, where all individuals share one chromosome, for example, there
will be several SNPs in a row where at least one allele is shared between individuals for
each SNP. The ``cM_threshold`` is then applied to each of these "matching segments" to
determine whether the segment could be a potential shared DNA segment (i.e., whether each
segment has a cM value greater than the threshold).
The matching segments that passed the ``cM_threshold`` are then checked to see if they
are adjacent to another matching segment, and if so, the segments are stitched together,
and the single SNP separating the segments is flagged as potentially discrepant. (This
means that multiple smaller matching segments passing the ``cM_threshold`` could be
stitched, identifying the SNP between each segment as discrepant.)
Next, the ``snp_threshold`` is applied to each segment to ensure there are enough SNPs in
the segment and the segment is not only a few SNPs in a region with a high recombination
rate; for each segment that passes this test, we have a segment of shared DNA, and the
total cMs for this segment are computed.
Finally, discrepant SNPs are checked to ensure that only SNPs internal to a shared DNA
segment are reported as discrepant (i.e., don't report a discrepant SNP if it was part of a
segment that didn't pass the ``snp_threshold``). Currently, no action other than reporting
is taken on discrepant SNPs.
Parameters
----------
individuals : iterable of Individuals
cM_threshold : float
minimum centiMorgans for each shared DNA segment
snp_threshold : int
minimum SNPs for each shared DNA segment
shared_genes : bool
determine shared genes
save_output : bool
specifies whether to save output files in the output directory
genetic_map : {'HapMap2', 'ACB', 'ASW', 'CDX', 'CEU', 'CHB', 'CHS', 'CLM', 'FIN', 'GBR', 'GIH', 'IBS', 'JPT', 'KHV', 'LWK', 'MKK', 'MXL', 'PEL', 'PUR', 'TSI', 'YRI'}
genetic map to use for computation of shared DNA; `HapMap2` corresponds to the HapMap
Phase II genetic map from the
`International HapMap Project <https://www.genome.gov/10001688/international-hapmap-project/>`_
and all others correspond to the
`population-specific <https://www.internationalgenome.org/faq/which-populations-are-part-your-study/>`_
genetic maps generated from the
`1000 Genomes Project <https://www.internationalgenome.org>`_ phased OMNI data.
Note that shared DNA is not computed on the X chromosome with the 1000 Genomes
Project genetic maps since the X chromosome is not included in these genetic maps.
Returns
-------
dict
dict with the following items:
one_chrom_shared_dna (pandas.DataFrame)
segments of shared DNA on one chromosome
two_chrom_shared_dna (pandas.DataFrame)
segments of shared DNA on two chromosomes
one_chrom_shared_genes (pandas.DataFrame)
shared genes on one chromosome
two_chrom_shared_genes (pandas.DataFrame)
shared genes on two chromosomes
one_chrom_discrepant_snps (pandas.Index)
discrepant SNPs discovered while finding shared DNA on one chromosome
two_chrom_discrepant_snps (pandas.Index)
discrepant SNPs discovered while finding shared DNA on two chromosomes
"""
# initialize all objects to be returned to be empty to start
one_chrom_shared_dna = | pd.DataFrame() | pandas.DataFrame |
import sys
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
# How to process duplicates
keepDuplicatesStrategy = "first"
def load_data(messages_filepath, categories_filepath):
""" Load the data from the passed paths to csv files.
Args:
messages_filepath: Path to the messages csv file.
categories_filepath: Path to the categories csv file.
"""
messages = pd.read_csv(messages_filepath)
categories = | pd.read_csv(categories_filepath) | pandas.read_csv |
import argparse
import os
import re
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from scipy import stats
MIN_TEXT_SIZE = 1000
MAX_TEXT_SIZE = 100000
def relative_file(*paths):
return os.path.join(os.path.dirname(__file__), *paths)
def read_csv(filename):
print('reading:', filename)
return pd.read_csv(filename)
parser = argparse.ArgumentParser()
parser.add_argument(
'--output', help='path the the output directory', default=relative_file('../results')
)
parser.add_argument(
'--countries',
help='path the the country metadata',
default=relative_file('../data/pmc_metadata.affiliations.countries.csv'),
)
parser.add_argument(
'--size',
help='path the the text size metadata',
default=relative_file('../data/pmc_articles.text_size.csv'),
)
parser.add_argument(
'--pubtype',
help='path the the publication type metadata',
default=relative_file('../data/pmc_metadata.entrez.csv'),
)
parser.add_argument(
'--scores',
help='path the the LDA scores data',
default=relative_file('../data/pmc_articles.lda_coherence.csv'),
)
args = parser.parse_args()
sns.set_style('whitegrid')
def savefig(ax, plot_name):
plot_name = os.path.join(args.output, plot_name)
print('writing:', plot_name)
try:
ax.figure.savefig(plot_name, bbox_inches='tight')
except AttributeError:
ax.savefig(plot_name, bbox_inches='tight')
plt.close()
loc_df = read_csv(args.countries)[['PMCID', 'country']]
pubtype_df = read_csv(args.pubtype)
pubtype_df = pubtype_df[pubtype_df.lang == 'eng']
text_size_df = read_csv(args.size)
df = read_csv(args.scores)
df['PMCID'] = df.filename.str.split('.').str[0]
df = df.merge(loc_df, on=['PMCID'], how='inner')
df = df.merge(pubtype_df, on=['PMCID'], how='inner')
df = df.merge(text_size_df.copy(), on=['PMCID'], how='inner')
df['is_english'] = df.country.isin({'US', 'UK', 'Canada', 'Australia'})
df['short_text'] = df.text_size < MIN_TEXT_SIZE
df['text_size_bin'] = df['text_size'].apply(lambda x: round(x, -2))
ax = sns.relplot(kind='scatter', data=df, x='text_size', y='score', hue='is_english')
plt.axvline(MIN_TEXT_SIZE)
plt.axvline(MAX_TEXT_SIZE)
ax.set(xscale='log')
savefig(ax, 'pmc.lda_coherence.text_size.scatter.png')
# now drop low text size
df = df[(df.text_size >= MIN_TEXT_SIZE) & (df.text_size <= MAX_TEXT_SIZE)].copy()
print(df[(df.score == 1) & (df.text_size >= 1000)])
# create stats for sheets output
ttest = stats.ttest_ind(df[df.is_english].text_size, df[~df.is_english].text_size, equal_var=False)
ttest_scores = [('text_size', ttest.statistic, ttest.pvalue)]
ttest = stats.ttest_ind(df[df.is_english].score, df[~df.is_english].score, equal_var=False)
ttest_scores.append(('lda_coherence', ttest.statistic, ttest.pvalue))
ttest_scores = pd.DataFrame(ttest_scores, columns=['measure', 'ttest_statistic', 'ttest_pvalue'])
ttest_df = (
df.groupby(['is_english'])
.agg(
{
'score': ['mean', 'median', 'std'],
'PMCID': 'nunique',
}
)
.reset_index()
)
ttest_df.columns = [col[1] if len(col) > 1 else ''.join(col) for col in ttest_df.columns.values]
ttest_df['measure'] = 'lda_coherence'
temp_df = (
df.groupby(['is_english'])
.agg(
{
'text_size': ['mean', 'median', 'std'],
'PMCID': 'nunique',
}
)
.reset_index()
)
temp_df.columns = [col[1] if len(col) > 1 else ''.join(col) for col in temp_df.columns.values]
temp_df['measure'] = 'text_size'
ttest_df = | pd.concat([ttest_df, temp_df]) | pandas.concat |
from .io import read_annotations, save_annotations
import warnings
import glob
import os
import os.path as path
import numpy as np
import pandas as pd
class AnnotationFormat:
"""
Class containing useful data for accessing and manipulating annotations.
I've tried to extract as many "magic constants" out of the actual methods as
possible so that they can be grouped here and changed easily in the future.
"""
# Column Names
LEFT_COL = "Begin Time (s)"
RIGHT_COL = "End Time (s)"
TOP_COL = "High Freq (Hz)"
BOT_COL = "Low Freq (Hz)"
CLASS_COL = "Species"
CLASS_CONF_COL = "Species Confidence"
CALL_UNCERTAINTY_COL = "Call Uncertainty"
# Column which cannot be left as NA or NaN
REQUIRED_COLS = [
LEFT_COL,
RIGHT_COL,
TOP_COL,
BOT_COL,
CLASS_COL,
CLASS_CONF_COL,
CALL_UNCERTAINTY_COL
]
# Dictionary mapping annotator's noisy labels to a constant class name
CLASS_LABEL_MAP = {
"humpback whale": "hb",
"hb whale": "hb",
"hb?": "hb",
"hhb": "hb",
"hb": "hb",
"jn": "hb",
"sea lion": "sl",
"sl": "sl",
"rockfish": "rf",
"rf": "rf",
"killer whale": "kw",
"kw": "kw",
"?": "?",
"mech": "?",
"mechanical": "?"
}
# Boxes need to span at least 1ms and 1/100 Hz
# If a box is dropped for this reason, it was likely created by mistake.
BOX_MIN_DURATION = 1e-3
BOX_MIN_FREQ_RANGE = 1e-2
# Useful glob patterns for finding annotation files and mispellings
PATTERN = "*.*-*.txt"
BAD_PATTERNS = ["*.*_*.txt"]
_format = AnnotationFormat()
def get_all_classes(annotation_paths, verbose=False):
"""
Returns a list of all classes seen in the annotation files.
Parameters
annotation_paths : list of str
paths to the .txt annotation files (eg: ['/foo/bar/annots.txt'])
verbose : bool, optional (default: False)
flag to control whether debug information is printed
Returns
classes : list of str
List containing all unique classes
"""
classes = set()
for annot_fname in annotation_paths:
classes.update(list(read_annotations(annot_fname)[_format.CLASS_COL].unique()))
classes = sorted([s for s in list(classes)])
if verbose:
print("Classes found: ", classes)
return classes
def get_area(annotation):
"""
Calculates the area of a single annotation box.
Parameters
annotation : pandas Series
a single annotation
Returns
area : float
Area of the bounding box (Hz*Seconds)
"""
return ((annotation[_format.RIGHT_COL] - annotation[_format.LEFT_COL])
* (annotation[_format.TOP_COL] - annotation[_format.BOT_COL]))
def get_all_annotations_in_directory(directory, check_misnomers=True):
"""
Uses glob to construct a list of paths to each file in the provided
directory which matches the correct formatting of an annotation file name.
Parameters
directory : str
path to the directory of interest
check_misnomers : bool, optional (default: True)
flag to control whether to warn about potential filename mistakes
Returns
good_results : List of str
Paths found in the given directory which match the filename pattern
"""
good_results = glob.glob(path.join(directory, _format.PATTERN))
if check_misnomers:
# Check if there are any incorrectly named files that may be overlooked
bad_results = []
for bad_pattern in _format.BAD_PATTERNS:
bad_results.extend(glob.glob(path.join(directory, bad_pattern)))
if len(bad_results) > 0:
warnings.warn(
"({}) Some files in {} may be incorrectly named: " \
"[\n {}\n]".format(
"get_all_annotations_in_directory",
directory,
",\n ".join(bad_results)
)
)
return good_results
def levenshteinDistanceDP(token1, token2):
"""
Efficiently calculates the Levenshtein distance (edit distance) between two
strings. Useful for determining if a column name has been misspelled.
The cost of insertions, deletions, and substitutions are all set to 1.
Parameters
token1 : str
first token
token2 : str
second token
Returns
distance : int
the number of single-character edits required to turn token1 into token2
"""
distances = np.zeros((len(token1) + 1, len(token2) + 1))
for t1 in range(len(token1) + 1):
distances[t1][0] = t1
for t2 in range(len(token2) + 1):
distances[0][t2] = t2
a, b, c = 0, 0, 0
for t1 in range(1, len(token1) + 1):
for t2 in range(1, len(token2) + 1):
if (token1[t1-1] == token2[t2-1]):
distances[t1][t2] = distances[t1 - 1][t2 - 1]
else:
a = distances[t1][t2 - 1]
b = distances[t1 - 1][t2]
c = distances[t1 - 1][t2 - 1]
if (a <= b and a <= c):
distances[t1][t2] = a + 1
elif (b <= a and b <= c):
distances[t1][t2] = b + 1
else:
distances[t1][t2] = c + 1
return distances[len(token1)][len(token2)]
def _print_n_rejected(n_rejected, reason):
if n_rejected > 0:
print("Rejecting {} annotation(s) for {}".format(
n_rejected,
reason
))
def clean_annotations(annotations, verbose=False):
"""
Cleans a single DataFrame of annotations by identifying invalid annotations
and separating them from the valid annotations.
Additionally checks for other formatting issues such as misnamed columns.
Parameters
annotations : DataFrame
a set of annotations from a single recording
verbose : bool, optional (default: False)
flag to control whether debug information is printed
Returns
valid_annotations : DataFrame
the annotations that passed every filter
invalid_annotations : DataFrame
the annotations that failed at least one filter
"""
annotations = annotations.copy()
original_size = len(annotations)
# Check for misnamed columns
column_map = {}
for req_col in _format.REQUIRED_COLS:
# For each required column, find the column with a dist <= 1.
matches = []
for col in annotations.columns:
dist = levenshteinDistanceDP(col, req_col)
if dist <= 1:
matches.append(col)
if dist > 0:
column_map[col] = req_col
if len(matches) > 1:
warnings.warn(
"({}) Required Column '{}' matches multiple " \
"columns: [{}]".format(
"clean_annotations",
req_col,
", ".join(matches)
)
)
# This required column is ambiguous. Stop and reject all.
# TODO: Write logic to combine ambiguous columns automatically
return | pd.DataFrame(columns=annotations.columns) | pandas.DataFrame |
import numpy
import pyearth
import pandas as pd
from pyearth import Earth
pathToInputData = 'C:\\__DEMO1\\Memory.csv'
dateTimeFormat = '%d/%m/%Y %H:%M'
pathToOutputData = 'C:\\__DEMO1\\output.txt'
# Write array to file
def array_to_file(the_array, file_name):
the_file = open(file_name, 'w')
for item in the_array:
the_file.write('%s\n' % item)
def buildModel():
# Read our data
data = | pd.read_csv(pathToInputData,index_col=0) | pandas.read_csv |
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
from evalml.pipelines.components import LabelEncoder
def test_label_encoder_init():
encoder = LabelEncoder()
assert encoder.parameters == {"positive_label": None}
assert encoder.random_seed == 0
def test_label_encoder_fit_transform_y_is_None():
X = pd.DataFrame({})
y = pd.Series(["a", "b"])
encoder = LabelEncoder()
with pytest.raises(ValueError, match="y cannot be None"):
encoder.fit(X)
encoder.fit(X, y)
with pytest.raises(ValueError, match="y cannot be None"):
encoder.inverse_transform(None)
def test_label_encoder_transform_y_is_None():
X = pd.DataFrame({})
y = pd.Series(["a", "b"])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X)
assert_frame_equal(X, X_t)
assert y_t is None
def test_label_encoder_fit_transform_with_numeric_values_does_not_encode():
X = pd.DataFrame({})
# binary
y = pd.Series([0, 1, 1, 1, 0])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y, y_t)
# multiclass
X = pd.DataFrame({})
y = pd.Series([0, 1, 1, 2, 0, 2])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y, y_t)
def test_label_encoder_fit_transform_with_numeric_values_needs_encoding():
X = pd.DataFrame({})
# binary
y = pd.Series([2, 1, 2, 1])
y_expected = pd.Series([1, 0, 1, 0])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y_expected, y_t)
# multiclass
y = pd.Series([0, 1, 1, 3, 0, 3])
y_expected = pd.Series([0, 1, 1, 2, 0, 2])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y_expected, y_t)
def test_label_encoder_fit_transform_with_categorical_values():
X = pd.DataFrame({})
# binary
y = pd.Series(["b", "a", "b", "b"])
y_expected = pd.Series([1, 0, 1, 1])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y_expected, y_t)
# multiclass
y = pd.Series(["c", "a", "b", "c", "d"])
y_expected = pd.Series([2, 0, 1, 2, 3])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y_expected, y_t)
def test_label_encoder_fit_transform_equals_fit_and_transform():
X = pd.DataFrame({})
y = pd.Series(["a", "b", "c", "a"])
encoder = LabelEncoder()
X_fit_transformed, y_fit_transformed = encoder.fit_transform(X, y)
encoder_duplicate = LabelEncoder()
encoder_duplicate.fit(X, y)
X_transformed, y_transformed = encoder_duplicate.transform(X, y)
assert_frame_equal(X_fit_transformed, X_transformed)
assert_series_equal(y_fit_transformed, y_transformed)
def test_label_encoder_inverse_transform():
X = pd.DataFrame({})
y = pd.Series(["a", "b", "c", "a"])
y_expected = ww.init_series(y)
encoder = LabelEncoder()
_, y_fit_transformed = encoder.fit_transform(X, y)
y_inverse_transformed = encoder.inverse_transform(y_fit_transformed)
assert_series_equal(y_expected, y_inverse_transformed)
y_encoded = pd.Series([1, 0, 2, 1])
y_expected = ww.init_series( | pd.Series(["b", "a", "c", "b"]) | pandas.Series |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import UserList
import io
import pathlib
import pytest
import socket
import threading
import weakref
import numpy as np
import pyarrow as pa
from pyarrow.tests.util import changed_environ
try:
from pandas.testing import assert_frame_equal, assert_series_equal
import pandas as pd
except ImportError:
pass
class IpcFixture:
write_stats = None
def __init__(self, sink_factory=lambda: io.BytesIO()):
self._sink_factory = sink_factory
self.sink = self.get_sink()
def get_sink(self):
return self._sink_factory()
def get_source(self):
return self.sink.getvalue()
def write_batches(self, num_batches=5, as_table=False):
nrows = 5
schema = pa.schema([('one', pa.float64()), ('two', pa.utf8())])
writer = self._get_writer(self.sink, schema)
batches = []
for i in range(num_batches):
batch = pa.record_batch(
[np.random.randn(nrows),
['foo', None, 'bar', 'bazbaz', 'qux']],
schema=schema)
batches.append(batch)
if as_table:
table = pa.Table.from_batches(batches)
writer.write_table(table)
else:
for batch in batches:
writer.write_batch(batch)
self.write_stats = writer.stats
writer.close()
return batches
class FileFormatFixture(IpcFixture):
is_file = True
options = None
def _get_writer(self, sink, schema):
return pa.ipc.new_file(sink, schema, options=self.options)
def _check_roundtrip(self, as_table=False):
batches = self.write_batches(as_table=as_table)
file_contents = pa.BufferReader(self.get_source())
reader = pa.ipc.open_file(file_contents)
assert reader.num_record_batches == len(batches)
for i, batch in enumerate(batches):
# it works. Must convert back to DataFrame
batch = reader.get_batch(i)
assert batches[i].equals(batch)
assert reader.schema.equals(batches[0].schema)
assert isinstance(reader.stats, pa.ipc.ReadStats)
assert isinstance(self.write_stats, pa.ipc.WriteStats)
assert tuple(reader.stats) == tuple(self.write_stats)
class StreamFormatFixture(IpcFixture):
# ARROW-6474, for testing writing old IPC protocol with 4-byte prefix
use_legacy_ipc_format = False
# ARROW-9395, for testing writing old metadata version
options = None
is_file = False
def _get_writer(self, sink, schema):
return pa.ipc.new_stream(
sink,
schema,
use_legacy_format=self.use_legacy_ipc_format,
options=self.options,
)
class MessageFixture(IpcFixture):
def _get_writer(self, sink, schema):
return pa.RecordBatchStreamWriter(sink, schema)
@pytest.fixture
def ipc_fixture():
return IpcFixture()
@pytest.fixture
def file_fixture():
return FileFormatFixture()
@pytest.fixture
def stream_fixture():
return StreamFormatFixture()
@pytest.fixture(params=[
pytest.param(
pytest.lazy_fixture('file_fixture'),
id='File Format'
),
pytest.param(
pytest.lazy_fixture('stream_fixture'),
id='Stream Format'
)
])
def format_fixture(request):
return request.param
def test_empty_file():
buf = b''
with pytest.raises(pa.ArrowInvalid):
pa.ipc.open_file(pa.BufferReader(buf))
def test_file_simple_roundtrip(file_fixture):
file_fixture._check_roundtrip(as_table=False)
def test_file_write_table(file_fixture):
file_fixture._check_roundtrip(as_table=True)
@pytest.mark.parametrize("sink_factory", [
lambda: io.BytesIO(),
lambda: pa.BufferOutputStream()
])
def test_file_read_all(sink_factory):
fixture = FileFormatFixture(sink_factory)
batches = fixture.write_batches()
file_contents = pa.BufferReader(fixture.get_source())
reader = pa.ipc.open_file(file_contents)
result = reader.read_all()
expected = pa.Table.from_batches(batches)
assert result.equals(expected)
def test_open_file_from_buffer(file_fixture):
# ARROW-2859; APIs accept the buffer protocol
file_fixture.write_batches()
source = file_fixture.get_source()
reader1 = pa.ipc.open_file(source)
reader2 = pa.ipc.open_file(pa.BufferReader(source))
reader3 = pa.RecordBatchFileReader(source)
result1 = reader1.read_all()
result2 = reader2.read_all()
result3 = reader3.read_all()
assert result1.equals(result2)
assert result1.equals(result3)
st1 = reader1.stats
assert st1.num_messages == 6
assert st1.num_record_batches == 5
assert reader2.stats == st1
assert reader3.stats == st1
@pytest.mark.pandas
def test_file_read_pandas(file_fixture):
frames = [batch.to_pandas() for batch in file_fixture.write_batches()]
file_contents = pa.BufferReader(file_fixture.get_source())
reader = pa.ipc.open_file(file_contents)
result = reader.read_pandas()
expected = pd.concat(frames).reset_index(drop=True)
assert_frame_equal(result, expected)
def test_file_pathlib(file_fixture, tmpdir):
file_fixture.write_batches()
source = file_fixture.get_source()
path = tmpdir.join('file.arrow').strpath
with open(path, 'wb') as f:
f.write(source)
t1 = pa.ipc.open_file(pathlib.Path(path)).read_all()
t2 = pa.ipc.open_file(pa.OSFile(path)).read_all()
assert t1.equals(t2)
def test_empty_stream():
buf = io.BytesIO(b'')
with pytest.raises(pa.ArrowInvalid):
pa.ipc.open_stream(buf)
@pytest.mark.pandas
def test_stream_categorical_roundtrip(stream_fixture):
df = pd.DataFrame({
'one': np.random.randn(5),
'two': pd.Categorical(['foo', np.nan, 'bar', 'foo', 'foo'],
categories=['foo', 'bar'],
ordered=True)
})
batch = pa.RecordBatch.from_pandas(df)
with stream_fixture._get_writer(stream_fixture.sink, batch.schema) as wr:
wr.write_batch(batch)
table = (pa.ipc.open_stream(pa.BufferReader(stream_fixture.get_source()))
.read_all())
assert_frame_equal(table.to_pandas(), df)
def test_open_stream_from_buffer(stream_fixture):
# ARROW-2859
stream_fixture.write_batches()
source = stream_fixture.get_source()
reader1 = pa.ipc.open_stream(source)
reader2 = pa.ipc.open_stream(pa.BufferReader(source))
reader3 = pa.RecordBatchStreamReader(source)
result1 = reader1.read_all()
result2 = reader2.read_all()
result3 = reader3.read_all()
assert result1.equals(result2)
assert result1.equals(result3)
st1 = reader1.stats
assert st1.num_messages == 6
assert st1.num_record_batches == 5
assert reader2.stats == st1
assert reader3.stats == st1
assert tuple(st1) == tuple(stream_fixture.write_stats)
@pytest.mark.pandas
def test_stream_write_dispatch(stream_fixture):
# ARROW-1616
df = pd.DataFrame({
'one': np.random.randn(5),
'two': pd.Categorical(['foo', np.nan, 'bar', 'foo', 'foo'],
categories=['foo', 'bar'],
ordered=True)
})
table = pa.Table.from_pandas(df, preserve_index=False)
batch = pa.RecordBatch.from_pandas(df, preserve_index=False)
with stream_fixture._get_writer(stream_fixture.sink, table.schema) as wr:
wr.write(table)
wr.write(batch)
table = (pa.ipc.open_stream(pa.BufferReader(stream_fixture.get_source()))
.read_all())
assert_frame_equal(table.to_pandas(),
pd.concat([df, df], ignore_index=True))
@pytest.mark.pandas
def test_stream_write_table_batches(stream_fixture):
# ARROW-504
df = pd.DataFrame({
'one': np.random.randn(20),
})
b1 = pa.RecordBatch.from_pandas(df[:10], preserve_index=False)
b2 = pa.RecordBatch.from_pandas(df, preserve_index=False)
table = pa.Table.from_batches([b1, b2, b1])
with stream_fixture._get_writer(stream_fixture.sink, table.schema) as wr:
wr.write_table(table, max_chunksize=15)
batches = list(pa.ipc.open_stream(stream_fixture.get_source()))
assert list(map(len, batches)) == [10, 15, 5, 10]
result_table = pa.Table.from_batches(batches)
assert_frame_equal(result_table.to_pandas(),
pd.concat([df[:10], df, df[:10]],
ignore_index=True))
@pytest.mark.parametrize('use_legacy_ipc_format', [False, True])
def test_stream_simple_roundtrip(stream_fixture, use_legacy_ipc_format):
stream_fixture.use_legacy_ipc_format = use_legacy_ipc_format
batches = stream_fixture.write_batches()
file_contents = pa.BufferReader(stream_fixture.get_source())
reader = pa.ipc.open_stream(file_contents)
assert reader.schema.equals(batches[0].schema)
total = 0
for i, next_batch in enumerate(reader):
assert next_batch.equals(batches[i])
total += 1
assert total == len(batches)
with pytest.raises(StopIteration):
reader.read_next_batch()
@pytest.mark.zstd
def test_compression_roundtrip():
sink = io.BytesIO()
values = np.random.randint(0, 3, 10000)
table = pa.Table.from_arrays([values], names=["values"])
options = pa.ipc.IpcWriteOptions(compression='zstd')
with pa.ipc.RecordBatchFileWriter(
sink, table.schema, options=options) as writer:
writer.write_table(table)
len1 = len(sink.getvalue())
sink2 = io.BytesIO()
codec = pa.Codec('zstd', compression_level=5)
options = pa.ipc.IpcWriteOptions(compression=codec)
with pa.ipc.RecordBatchFileWriter(
sink2, table.schema, options=options) as writer:
writer.write_table(table)
len2 = len(sink2.getvalue())
# In theory len2 should be less than len1 but for this test we just want
# to ensure compression_level is being correctly passed down to the C++
# layer so we don't really care if it makes it worse or better
assert len2 != len1
t1 = pa.ipc.open_file(sink).read_all()
t2 = pa.ipc.open_file(sink2).read_all()
assert t1 == t2
def test_write_options():
options = pa.ipc.IpcWriteOptions()
assert options.allow_64bit is False
assert options.use_legacy_format is False
assert options.metadata_version == pa.ipc.MetadataVersion.V5
options.allow_64bit = True
assert options.allow_64bit is True
options.use_legacy_format = True
assert options.use_legacy_format is True
options.metadata_version = pa.ipc.MetadataVersion.V4
assert options.metadata_version == pa.ipc.MetadataVersion.V4
for value in ('V5', 42):
with pytest.raises((TypeError, ValueError)):
options.metadata_version = value
assert options.compression is None
for value in ['lz4', 'zstd']:
if pa.Codec.is_available(value):
options.compression = value
assert options.compression == value
options.compression = value.upper()
assert options.compression == value
options.compression = None
assert options.compression is None
with pytest.raises(TypeError):
options.compression = 0
assert options.use_threads is True
options.use_threads = False
assert options.use_threads is False
if pa.Codec.is_available('lz4'):
options = pa.ipc.IpcWriteOptions(
metadata_version=pa.ipc.MetadataVersion.V4,
allow_64bit=True,
use_legacy_format=True,
compression='lz4',
use_threads=False)
assert options.metadata_version == pa.ipc.MetadataVersion.V4
assert options.allow_64bit is True
assert options.use_legacy_format is True
assert options.compression == 'lz4'
assert options.use_threads is False
def test_write_options_legacy_exclusive(stream_fixture):
with pytest.raises(
ValueError,
match="provide at most one of options and use_legacy_format"):
stream_fixture.use_legacy_ipc_format = True
stream_fixture.options = pa.ipc.IpcWriteOptions()
stream_fixture.write_batches()
@pytest.mark.parametrize('options', [
pa.ipc.IpcWriteOptions(),
pa.ipc.IpcWriteOptions(allow_64bit=True),
pa.ipc.IpcWriteOptions(use_legacy_format=True),
pa.ipc.IpcWriteOptions(metadata_version=pa.ipc.MetadataVersion.V4),
pa.ipc.IpcWriteOptions(use_legacy_format=True,
metadata_version=pa.ipc.MetadataVersion.V4),
])
def test_stream_options_roundtrip(stream_fixture, options):
stream_fixture.use_legacy_ipc_format = None
stream_fixture.options = options
batches = stream_fixture.write_batches()
file_contents = pa.BufferReader(stream_fixture.get_source())
message = pa.ipc.read_message(stream_fixture.get_source())
assert message.metadata_version == options.metadata_version
reader = pa.ipc.open_stream(file_contents)
assert reader.schema.equals(batches[0].schema)
total = 0
for i, next_batch in enumerate(reader):
assert next_batch.equals(batches[i])
total += 1
assert total == len(batches)
with pytest.raises(StopIteration):
reader.read_next_batch()
def test_dictionary_delta(format_fixture):
ty = pa.dictionary(pa.int8(), pa.utf8())
data = [["foo", "foo", None],
["foo", "bar", "foo"], # potential delta
["foo", "bar"], # nothing new
["foo", None, "bar", "quux"], # potential delta
["bar", "quux"], # replacement
]
batches = [
pa.RecordBatch.from_arrays([pa.array(v, type=ty)], names=['dicts'])
for v in data]
batches_delta_only = batches[:4]
schema = batches[0].schema
def write_batches(batches, as_table=False):
with format_fixture._get_writer(pa.MockOutputStream(),
schema) as writer:
if as_table:
table = pa.Table.from_batches(batches)
writer.write_table(table)
else:
for batch in batches:
writer.write_batch(batch)
return writer.stats
if format_fixture.is_file:
# File format cannot handle replacement
with pytest.raises(pa.ArrowInvalid):
write_batches(batches)
# File format cannot handle delta if emit_deltas
# is not provided
with pytest.raises(pa.ArrowInvalid):
write_batches(batches_delta_only)
else:
st = write_batches(batches)
assert st.num_record_batches == 5
assert st.num_dictionary_batches == 4
assert st.num_replaced_dictionaries == 3
assert st.num_dictionary_deltas == 0
format_fixture.use_legacy_ipc_format = None
format_fixture.options = pa.ipc.IpcWriteOptions(
emit_dictionary_deltas=True)
if format_fixture.is_file:
# File format cannot handle replacement
with pytest.raises(pa.ArrowInvalid):
write_batches(batches)
else:
st = write_batches(batches)
assert st.num_record_batches == 5
assert st.num_dictionary_batches == 4
assert st.num_replaced_dictionaries == 1
assert st.num_dictionary_deltas == 2
st = write_batches(batches_delta_only)
assert st.num_record_batches == 4
assert st.num_dictionary_batches == 3
assert st.num_replaced_dictionaries == 0
assert st.num_dictionary_deltas == 2
format_fixture.options = pa.ipc.IpcWriteOptions(
unify_dictionaries=True
)
st = write_batches(batches, as_table=True)
assert st.num_record_batches == 5
if format_fixture.is_file:
assert st.num_dictionary_batches == 1
assert st.num_replaced_dictionaries == 0
assert st.num_dictionary_deltas == 0
else:
assert st.num_dictionary_batches == 4
assert st.num_replaced_dictionaries == 3
assert st.num_dictionary_deltas == 0
def test_envvar_set_legacy_ipc_format():
schema = pa.schema([pa.field('foo', pa.int32())])
writer = pa.ipc.new_stream(pa.BufferOutputStream(), schema)
assert not writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V5
writer = pa.ipc.new_file(pa.BufferOutputStream(), schema)
assert not writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V5
with changed_environ('ARROW_PRE_0_15_IPC_FORMAT', '1'):
writer = pa.ipc.new_stream(pa.BufferOutputStream(), schema)
assert writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V5
writer = pa.ipc.new_file(pa.BufferOutputStream(), schema)
assert writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V5
with changed_environ('ARROW_PRE_1_0_METADATA_VERSION', '1'):
writer = pa.ipc.new_stream(pa.BufferOutputStream(), schema)
assert not writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V4
writer = pa.ipc.new_file(pa.BufferOutputStream(), schema)
assert not writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V4
with changed_environ('ARROW_PRE_1_0_METADATA_VERSION', '1'):
with changed_environ('ARROW_PRE_0_15_IPC_FORMAT', '1'):
writer = pa.ipc.new_stream(pa.BufferOutputStream(), schema)
assert writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V4
writer = pa.ipc.new_file(pa.BufferOutputStream(), schema)
assert writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V4
def test_stream_read_all(stream_fixture):
batches = stream_fixture.write_batches()
file_contents = pa.BufferReader(stream_fixture.get_source())
reader = pa.ipc.open_stream(file_contents)
result = reader.read_all()
expected = pa.Table.from_batches(batches)
assert result.equals(expected)
@pytest.mark.pandas
def test_stream_read_pandas(stream_fixture):
frames = [batch.to_pandas() for batch in stream_fixture.write_batches()]
file_contents = stream_fixture.get_source()
reader = pa.ipc.open_stream(file_contents)
result = reader.read_pandas()
expected = pd.concat(frames).reset_index(drop=True)
assert_frame_equal(result, expected)
@pytest.fixture
def example_messages(stream_fixture):
batches = stream_fixture.write_batches()
file_contents = stream_fixture.get_source()
buf_reader = pa.BufferReader(file_contents)
reader = pa.MessageReader.open_stream(buf_reader)
return batches, list(reader)
def test_message_ctors_no_segfault():
with pytest.raises(TypeError):
repr(pa.Message())
with pytest.raises(TypeError):
repr(pa.MessageReader())
def test_message_reader(example_messages):
_, messages = example_messages
assert len(messages) == 6
assert messages[0].type == 'schema'
assert isinstance(messages[0].metadata, pa.Buffer)
assert isinstance(messages[0].body, pa.Buffer)
assert messages[0].metadata_version == pa.MetadataVersion.V5
for msg in messages[1:]:
assert msg.type == 'record batch'
assert isinstance(msg.metadata, pa.Buffer)
assert isinstance(msg.body, pa.Buffer)
assert msg.metadata_version == pa.MetadataVersion.V5
def test_message_serialize_read_message(example_messages):
_, messages = example_messages
msg = messages[0]
buf = msg.serialize()
reader = pa.BufferReader(buf.to_pybytes() * 2)
restored = pa.ipc.read_message(buf)
restored2 = pa.ipc.read_message(reader)
restored3 = pa.ipc.read_message(buf.to_pybytes())
restored4 = pa.ipc.read_message(reader)
assert msg.equals(restored)
assert msg.equals(restored2)
assert msg.equals(restored3)
assert msg.equals(restored4)
with pytest.raises(pa.ArrowInvalid, match="Corrupted message"):
pa.ipc.read_message(pa.BufferReader(b'ab'))
with pytest.raises(EOFError):
pa.ipc.read_message(reader)
@pytest.mark.gzip
def test_message_read_from_compressed(example_messages):
# Part of ARROW-5910
_, messages = example_messages
for message in messages:
raw_out = pa.BufferOutputStream()
with pa.output_stream(raw_out, compression='gzip') as compressed_out:
message.serialize_to(compressed_out)
compressed_buf = raw_out.getvalue()
result = pa.ipc.read_message(pa.input_stream(compressed_buf,
compression='gzip'))
assert result.equals(message)
def test_message_read_record_batch(example_messages):
batches, messages = example_messages
for batch, message in zip(batches, messages[1:]):
read_batch = pa.ipc.read_record_batch(message, batch.schema)
assert read_batch.equals(batch)
def test_read_record_batch_on_stream_error_message():
# ARROW-5374
batch = pa.record_batch([pa.array([b"foo"], type=pa.utf8())],
names=['strs'])
stream = pa.BufferOutputStream()
with pa.ipc.new_stream(stream, batch.schema) as writer:
writer.write_batch(batch)
buf = stream.getvalue()
with pytest.raises(IOError,
match="type record batch but got schema"):
pa.ipc.read_record_batch(buf, batch.schema)
# ----------------------------------------------------------------------
# Socket streaming testa
class StreamReaderServer(threading.Thread):
def init(self, do_read_all):
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.bind(('127.0.0.1', 0))
self._sock.listen(1)
host, port = self._sock.getsockname()
self._do_read_all = do_read_all
self._schema = None
self._batches = []
self._table = None
return port
def run(self):
connection, client_address = self._sock.accept()
try:
source = connection.makefile(mode='rb')
reader = pa.ipc.open_stream(source)
self._schema = reader.schema
if self._do_read_all:
self._table = reader.read_all()
else:
for i, batch in enumerate(reader):
self._batches.append(batch)
finally:
connection.close()
def get_result(self):
return(self._schema, self._table if self._do_read_all
else self._batches)
class SocketStreamFixture(IpcFixture):
def __init__(self):
# XXX(wesm): test will decide when to start socket server. This should
# probably be refactored
pass
def start_server(self, do_read_all):
self._server = StreamReaderServer()
port = self._server.init(do_read_all)
self._server.start()
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect(('127.0.0.1', port))
self.sink = self.get_sink()
def stop_and_get_result(self):
import struct
self.sink.write(struct.pack('Q', 0))
self.sink.flush()
self._sock.close()
self._server.join()
return self._server.get_result()
def get_sink(self):
return self._sock.makefile(mode='wb')
def _get_writer(self, sink, schema):
return pa.RecordBatchStreamWriter(sink, schema)
@pytest.fixture
def socket_fixture():
return SocketStreamFixture()
def test_socket_simple_roundtrip(socket_fixture):
socket_fixture.start_server(do_read_all=False)
writer_batches = socket_fixture.write_batches()
reader_schema, reader_batches = socket_fixture.stop_and_get_result()
assert reader_schema.equals(writer_batches[0].schema)
assert len(reader_batches) == len(writer_batches)
for i, batch in enumerate(writer_batches):
assert reader_batches[i].equals(batch)
def test_socket_read_all(socket_fixture):
socket_fixture.start_server(do_read_all=True)
writer_batches = socket_fixture.write_batches()
_, result = socket_fixture.stop_and_get_result()
expected = pa.Table.from_batches(writer_batches)
assert result.equals(expected)
# ----------------------------------------------------------------------
# Miscellaneous IPC tests
@pytest.mark.pandas
def test_ipc_file_stream_has_eos():
# ARROW-5395
df = pd.DataFrame({'foo': [1.5]})
batch = pa.RecordBatch.from_pandas(df)
sink = pa.BufferOutputStream()
write_file(batch, sink)
buffer = sink.getvalue()
# skip the file magic
reader = pa.ipc.open_stream(buffer[8:])
# will fail if encounters footer data instead of eos
rdf = reader.read_pandas()
assert_frame_equal(df, rdf)
@pytest.mark.pandas
def test_ipc_zero_copy_numpy():
df = pd.DataFrame({'foo': [1.5]})
batch = pa.RecordBatch.from_pandas(df)
sink = pa.BufferOutputStream()
write_file(batch, sink)
buffer = sink.getvalue()
reader = pa.BufferReader(buffer)
batches = read_file(reader)
data = batches[0].to_pandas()
rdf = pd.DataFrame(data)
| assert_frame_equal(df, rdf) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pandas as pd
from datetime import date
import matplotlib.pyplot as plt
import matplotlib.dates as md
import matplotlib
import seaborn as sns
import numpy as np
# In[1350]:
def setPandasOptions():
pd.set_option('display.max_rows', 200)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', 30)
| pd.set_option('display.max_seq_items', 100) | pandas.set_option |
import sqlite3
import pandas as pd
import hues
from collections import defaultdict
from tqdm import tqdm
import sklearn
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def reshape_tags(data, index=None, columns=None, values=None):
if (index is None) or (columns is None) or (values is None):
return None
cols = [str(x) for x in data[columns].unique()]
inds = [int(x) for x in data[index].unique()]
num_duplicates = defaultdict(lambda: defaultdict(set))
other_duplicates = defaultdict(lambda: defaultdict(set))
df = pd.DataFrame(columns=cols, index=inds)
for i in tqdm(range(len(data))):
ind, col, val = data.iloc[i][[index, columns, values]]
ind = int(ind)
old_val = df.loc[ind, col]
if | pd.isnull(old_val) | pandas.isnull |
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
import pandas as pd
from mlos.Logger import create_logger
from mlos.Tracer import trace
from mlos.Spaces import CategoricalDimension, DiscreteDimension, Point, SimpleHypergrid, DefaultConfigMeta
from mlos.Optimizers.BayesianOptimizerConvergenceState import BayesianOptimizerConvergenceState
from mlos.Optimizers.OptimizerInterface import OptimizerInterface
from mlos.Optimizers.OptimizationProblem import OptimizationProblem
from mlos.Optimizers.ExperimentDesigner.ExperimentDesigner import ExperimentDesigner, ExperimentDesignerConfig
from mlos.Optimizers.RegressionModels.GoodnessOfFitMetrics import DataSetType
from mlos.Optimizers.RegressionModels.HomogeneousRandomForestRegressionModel import HomogeneousRandomForestRegressionModel,\
HomogeneousRandomForestRegressionModelConfig
class BayesianOptimizerConfig(metaclass=DefaultConfigMeta):
CONFIG_SPACE = SimpleHypergrid(
name="bayesian_optimizer_config",
dimensions=[
CategoricalDimension(name="surrogate_model_implementation", values=[HomogeneousRandomForestRegressionModel.__name__]),
CategoricalDimension(name="experiment_designer_implementation", values=[ExperimentDesigner.__name__]),
DiscreteDimension(name="min_samples_required_for_guided_design_of_experiments", min=2, max=10000)
]
).join(
subgrid=HomogeneousRandomForestRegressionModelConfig.CONFIG_SPACE,
on_external_dimension=CategoricalDimension(name="surrogate_model_implementation", values=[HomogeneousRandomForestRegressionModel.__name__])
).join(
subgrid=ExperimentDesignerConfig.CONFIG_SPACE,
on_external_dimension=CategoricalDimension(name="experiment_designer_implementation", values=[ExperimentDesigner.__name__])
)
_DEFAULT = Point(
surrogate_model_implementation=HomogeneousRandomForestRegressionModel.__name__,
experiment_designer_implementation=ExperimentDesigner.__name__,
min_samples_required_for_guided_design_of_experiments=10,
homogeneous_random_forest_regression_model_config=HomogeneousRandomForestRegressionModelConfig.DEFAULT,
experiment_designer_config=ExperimentDesignerConfig.DEFAULT
)
class BayesianOptimizer(OptimizerInterface):
"""Generic Bayesian Optimizer based on regresson model
Uses extra trees as surrogate model and confidence bound acquisition function by default.
Attributes
----------
logger : Logger
optimization_problem : OptimizationProblem
surrogate_model : HomogeneousRandomForestRegressionModel
optimizer_config : Point
experiment_designer: ExperimentDesigner
"""
def __init__(
self,
optimization_problem: OptimizationProblem,
optimizer_config: Point,
logger=None
):
if logger is None:
logger = create_logger("BayesianOptimizer")
self.logger = logger
# Let's initialize the optimizer.
#
assert len(optimization_problem.objectives) == 1, "For now this is a single-objective optimizer."
OptimizerInterface.__init__(self, optimization_problem)
assert optimizer_config in BayesianOptimizerConfig.CONFIG_SPACE, "Invalid config."
self.optimizer_config = optimizer_config
# Now let's put together the surrogate model.
#
assert self.optimizer_config.surrogate_model_implementation == HomogeneousRandomForestRegressionModel.__name__, "TODO: implement more"
self.surrogate_model = HomogeneousRandomForestRegressionModel(
model_config=self.optimizer_config.homogeneous_random_forest_regression_model_config,
input_space=self.optimization_problem.parameter_space, # TODO: change to feature space
output_space=self.optimization_problem.objective_space,
logger=self.logger
)
# Now let's put together the experiment designer that will suggest parameters for each experiment.
#
assert self.optimizer_config.experiment_designer_implementation == ExperimentDesigner.__name__
self.experiment_designer = ExperimentDesigner(
designer_config=self.optimizer_config.experiment_designer_config,
optimization_problem=self.optimization_problem,
surrogate_model=self.surrogate_model,
logger=self.logger
)
self._optimizer_convergence_state = BayesianOptimizerConvergenceState(
surrogate_model_fit_state=self.surrogate_model.fit_state
)
# Also let's make sure we have the dataframes we need for the surrogate model.
# TODO: this will need a better home - either a DataSet class or the surrogate model itself.
self._feature_values_df = pd.DataFrame(columns=[dimension.name for dimension in self.optimization_problem.parameter_space.dimensions])
self._target_values_df = | pd.DataFrame(columns=[dimension.name for dimension in self.optimization_problem.objective_space.dimensions]) | pandas.DataFrame |
import multiprocessing
import os
import time
from datetime import datetime, timedelta
import sys
from functools import partial
import mongo_proxy
from pymongo import UpdateOne, ReplaceOne, DeleteMany, MongoClient
sys.path.extend([sys.argv[1]])
import settings
from mongo_orm import MongoDB, AnyField
from project_customization.flexcoop.models import DataPoint, Device
from project_customization.flexcoop.reports.telemetry_usage import get_data_model
from project_customization.flexcoop.timeseries_utils import timeseries_mapping, indoor_sensing, occupancy, meter, \
status_devices, device_status, atw_heatpumps
from project_customization.flexcoop.utils import convert_snake_case
import pandas as pd
import numpy as np
import pytz
"""We define the cronjobs to be executed to deal with the raw data recieved"""
#define the final timeseries models:
timezone = pytz.timezone("Europe/Madrid")
NUM_PROCESSES = 10
DEVICES_BY_PROC = 10
device_exception = ["76f899f2-323b-11ea-92d1-ac1f6b403fbc"]
def no_outliers_stats(series, lowq=2.5, highq=97.5):
hh = series[(series <= np.nanquantile(series, highq/100))& (series >= np.nanquantile(series, lowq/100))]
return {"mean": hh.mean(), "median": hh.median(), "std": hh.std()}
def clean_znorm_data(series, th, lowq=2.5, highq=97.5):
series1 = series.round(2).value_counts()
series1 = series1 / series1.sum()
series2 = series.copy()
for c in series1.iteritems():
if c[1] > 0.20:
series2 = series[series.round(2) != c[0]]
else:
break
stats = no_outliers_stats(series2, lowq, highq)
zscore = np.abs( (series - stats['median']) / stats['std'])
return series[zscore < th]
def znorm_value(series, window, th, lowq=2.5, highq=97.5):
val_index = int(window / 2)
if len(series) < val_index:
return 0
current = series.iloc[val_index]
stats = no_outliers_stats(series, lowq, highq)
if np.isnan(stats['std']):
zscore = 0
else:
zscore = np.abs((current - stats['median']) / stats['std'])
return zscore
def clean_znorm_window(series, th, lowq=2.5, highq=97.5):
zscore = series.rolling(window=49, center=True, min_periods=1).apply(znorm_value, raw=False, args=(49, th))
return series[zscore < th]
def clean_threshold_data(series, min_th=None, max_th=None):
if min_th is not None and max_th is not None:
return series[(min_th<=series) & (series<=max_th)]
elif min_th is not None:
return series[series >= min_th]
elif max_th is not None:
return series[series <= max_th]
else:
return series
def cleaning_data(series, period, operations):
df = pd.DataFrame(series)
for operation in operations:
if operation['type'] == 'threshold':
df.value = clean_threshold_data(df.value, min_th=operation['params'][0], max_th=operation['params'][1])
if operation['type'] == "znorm":
df.value = clean_znorm_data(df.value, operation['params'])
# if period == "backups":
# #print(len(series))
# df.value = clean_znorm_window(df.value, operation['params'])
return df.value
def clean_device_data_status(today, now, devices):
conn = mongo_proxy.MongoProxy(MongoClient(settings.MONGO_URI))
databasem = conn.get_database("flexcoop")
devicep = databasem['devices']
for device in devices:
print("starting ", device)
point = devicep.find_one({"device_id": device})
if not point:
continue
device_df = []
#fdsfa
for key in point['status'].keys():
try:
database = "{}_{}".format("status",convert_snake_case(key))
value = status_devices[database]
except:
continue
raw_model = databasem[database]
data = list(raw_model.find({"device_id": device}))
print("readed data ", key)
if not data:
continue
df = pd.DataFrame.from_records(data)
df.index = pd.to_datetime(df.dtstart, errors='coerce')
df = df[~df.index.isna()]
df = df.sort_index()
account_id = df.account_id.unique()[0]
aggregator_id = df.aggregator_id.unique()[0]
device_class = point['rid']
# instant values, expand the value tu the current time
df = df[['value']].append(pd.DataFrame({"value": np.nan}, index=[now]))
data_clean = df.fillna(method="pad")
if data_clean.empty:
continue
df = pd.DataFrame(data_clean)
df = df.rename(columns={"value": value['field']})
device_df.append(df)
print("treated data")
#fdsafdf
if device_df:
device_df_final = device_df.pop(0)
device_df_final = device_df_final.join(device_df, how="outer")
device_df_final = device_df_final.fillna(method="pad")
device_df_final['account_id'] = account_id
device_df_final['aggregator_id'] = aggregator_id
device_df_final['device_class'] = device_class
device_df_final['device_id'] = device
device_df_final['timestamp'] = device_df_final.index.to_pydatetime()
device_df_final['_created_at'] = datetime.utcnow()
device_df_final['_updated_at'] = datetime.utcnow()
device_df_final = device_df_final[device_df_final.index >= today.replace(tzinfo=None)]
df_ini = min(device_df_final.index)
df_max = max(device_df_final.index)
documents = device_df_final.to_dict('records')
print("writting_status_data {}".format(len(documents)))
databasem['device_status'].delete_many({"device_id": device, "timestamp": {"$gte":df_ini.to_pydatetime(), "$lte": df_max.to_pydatetime()}})
databasem['device_status'].insert_many(documents)
def aggregate_device_status(now):
print("********* START STATUS CLEAN {} *************", datetime.now())
today = timezone.localize(datetime(now.year,now.month,now.day)).astimezone(pytz.UTC)
devices = set()
for key, value in status_devices.items():
raw_model = get_data_model(key)
devices.update(raw_model.__mongo__.distinct("device_id"))
devices = list(devices)
# iterate for each device to obtain the clean data of each type.
a_pool = multiprocessing.Pool(NUM_PROCESSES)
devices_per_thread = DEVICES_BY_PROC;
a_pool.map(partial(clean_device_data_status, today, now), [devices[x:x+devices_per_thread] for x in range(0, len(devices), devices_per_thread)])
print("********* END STATUS CLEAN {} *************", datetime.now())
"""
data_clean = pd.DataFrame(df.value.resample("1s").mean())
mask = pd.DataFrame(data_clean.copy())
data_clean = mask.copy()
grp = ((mask.notnull() != mask.shift().notnull()).cumsum())
grp['ones'] = 1
mask['value'] = (grp.groupby('value')['ones'].transform('count') < 3600) | data_clean['value'].notnull()
data_clean.value = data_clean.value.interpolate(limit_direction="backward")[mask.value].diff()
data_clean.value = clean_threshold_data(data_clean.value, 0 , 0.004166)
data_clean_value = data_clean.value.resample(freq).mean()
data_clean_value = data_clean_value * 60 * 15
data_clean = pd.DataFrame(data_clean_value)
plt.plot(data_clean.value)
plt.show()
"""
def clean_device_data_timeseries(today, now, last_period, freq, period, device):
conn = MongoClient(settings.MONGO_URI)
database = conn.get_database("flexcoop")
datap = database['data_points']
print("starting ", device)
point = datap.find_one({"device_id": device})
if not point:
conn.close()
return
atw_heatpumps_df = []
indoor_sensing_df = []
occupancy_df = []
meter_df = []
for key in point['reporting_items'].keys():
try:
value = timeseries_mapping[key]
except:
continue
raw_model = database[key]
data = list(raw_model.find({"device_id": device, "dtstart":{"$lte":now.strftime("%Y-%m-%dT%H:%M:%S.%f"), "$gte": last_period.strftime("%Y-%m-%dT%H:%M:%S.%f")}}))
if not data:
#no data in the last period, get the last value ever.
print("nodata")
data =list(raw_model.find({"device_id": device, "dtstart": {"$lte": now.strftime("%Y-%m-%dT%H:%M:%S.%f")}}))
if not data:
print("nodata2")
continue
else:
print("data2")
#get the last value of the request
df = pd.DataFrame.from_records(data)
df.index = pd.to_datetime(df.dtstart, errors='coerce')
df = df[~df.index.isna()]
df = df.sort_index()
df = df.iloc[[-1]]
else:
df = pd.DataFrame.from_records(data)
df.index = pd.to_datetime(df.dtstart, errors='coerce')
df = df[~df.index.isna()]
df = df.sort_index()
# get the data_point information
point_info = point['reporting_items'][key]
reading_type = point_info['reading_type']
account_id = df.account_id.unique()[0]
aggregator_id = df.aggregator_id.unique()[0]
device_class = point['rid']
df = df.loc[~df.index.duplicated(keep='last')]
print("readed data ", key)
if reading_type == "Direct Read":
if value['operation'] == "SUM":
try:
df.value = pd.to_numeric(df.value)
except:
print("AVG is only valid for numeric values")
continue
data_check = df.value.diff()
data_clean = df.value[data_check.shift(-1) >=0]
data_clean = data_clean[data_check >= 0]
data_clean = pd.DataFrame(data_clean.resample("1s").mean())
data_clean['verified'] = data_clean.value.notna()
data_clean.verified = data_clean.verified[data_clean.value.notna()]
copy = pd.DataFrame(data_clean.value.resample("3H", label='right').max())
copy['verified'] = False
copy.value = copy.value.fillna(method='ffill')
data_clean = pd.concat([data_clean, copy], sort=True)
data_clean = data_clean[~data_clean.index.duplicated(keep='last')]
data_clean = data_clean.sort_index()
data_clean.value = data_clean.value.interpolate(limit_direction="backward").diff()
data_clean['verified_0'] = data_clean.verified.fillna(method='ffill')
data_clean['verified_1'] = data_clean.verified.fillna(method='bfill')
data_clean['verified'] = data_clean.verified_0 & data_clean.verified_1
data_clean.value = clean_threshold_data(data_clean.value, 0 , 0.004166)
data_clean_value = data_clean.value.resample(freq).mean()
data_clean_value = data_clean_value * 60 * 15
data_clean_verified = data_clean.verified.resample(freq).apply(all)
data_clean = pd.DataFrame(data_clean_value)
data_clean['verified_kwh'] = data_clean_verified
else:
data_clean = pd.DataFrame()
elif reading_type == "Net":
# instant values, expand the value tu the current time
df = df[['value']].append( | pd.DataFrame({"value": np.nan}, index=[now]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import lightgbm as lgbm
from scipy import sparse
from datetime import datetime
def load_sparse_matrix(filename):
y = np.load(filename)
z = sparse.coo_matrix((y['data'], (y['row'], y['col'])), shape=y['shape'])
return z
x_train = load_sparse_matrix('../input/train_tfidf.npz')
x_test = load_sparse_matrix('../input/test_tfidf.npz')
target = pd.read_csv('../input/train.csv', usecols=['final_status'])
train = pd.read_csv('../input/train.csv', usecols=['created_at', 'deadline', 'launched_at', 'state_changed_at'])
test = pd.read_csv('../input/test.csv', usecols=['created_at', 'deadline', 'launched_at', 'state_changed_at', 'project_id'])
train['created_at'] = pd.to_datetime(train['created_at'], unit='s')
train['state_changed_at'] = pd.to_datetime(train['state_changed_at'], unit='s')
train['deadline'] = pd.to_datetime(train['deadline'], unit='s')
train['launched_at'] = | pd.to_datetime(train['launched_at'], unit='s') | pandas.to_datetime |
import os
import pickle
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from utils.timefeatures import time_features
import warnings
warnings.filterwarnings('ignore')
class Dataset_ETT_hour(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
border1s = [0, 12*30*24 - self.seq_len, 12*30*24+4*30*24 - self.seq_len]
border2s = [12*30*24, 12*30*24+4*30*24, 12*30*24+8*30*24]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS':
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0]:border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
self.data_x = data[border1:border2]
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
if self.inverse:
seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0)
else:
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len- self.pred_len + 1
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
class NASA_Anomaly(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='M', data_path='SMAP',
target=0, scale=True):
# size [seq_len, label_len pred_len]
# info
if size == None:
self.seq_len = 8*60
self.label_len = 2*60
self.pred_len = 2*60
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.flag = flag
self.features = features
self.target = target
self.scale = scale
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def get_data_dim(self, dataset):
if dataset == 'SMAP':
return 25
elif dataset == 'MSL':
return 55
elif str(dataset).startswith('machine'):
return 38
else:
raise ValueError('unknown dataset '+str(dataset))
def __read_data__(self):
"""
get data from pkl files
return shape: (([train_size, x_dim], [train_size] or None), ([test_size, x_dim], [test_size]))
"""
x_dim = self.get_data_dim(self.data_path)
if self.flag == 'train':
f = open(os.path.join(self.root_path, self.data_path, '{}_train.pkl'.format(self.data_path)), "rb")
data = pickle.load(f).reshape((-1, x_dim))
f.close()
elif self.flag in ['val', 'test']:
try:
f = open(os.path.join(self.root_path, self.data_path, '{}_test.pkl'.format(self.data_path)), "rb")
data = pickle.load(f).reshape((-1, x_dim))
f.close()
except (KeyError, FileNotFoundError):
data = None
try:
f = open(os.path.join(self.root_path, self.data_path, '{}_test_label.pkl'.format(self.data_path)), "rb")
label = pickle.load(f).reshape((-1))
f.close()
except (KeyError, FileNotFoundError):
label = None
assert len(data) == len(label), "length of test data shoube the same as label"
if self.scale:
data = self.preprocess(data)
df_stamp = pd.DataFrame(columns=['date'])
date = pd.date_range(start='1/1/2015', periods=len(data), freq='4s')
df_stamp['date'] = date
df_stamp['month'] = df_stamp.date.apply(lambda row:row.month,1)
df_stamp['day'] = df_stamp.date.apply(lambda row:row.day,1)
df_stamp['weekday'] = df_stamp.date.apply(lambda row:row.weekday(),1)
df_stamp['hour'] = df_stamp.date.apply(lambda row:row.hour,1)
df_stamp['minute'] = df_stamp.date.apply(lambda row:row.minute,1)
# df_stamp['minute'] = df_stamp.minute.map(lambda x:x//10)
df_stamp['second'] = df_stamp.date.apply(lambda row:row.second,1)
data_stamp = df_stamp.drop(['date'],1).values
if self.flag == 'train':
if self.features=='M':
self.data_x = data
self.data_y = data
elif self.features=='S':
df_data = data[:, [self.target]]
self.data_x = df_data
self.data_y = df_data
else:
border1s = [0, 0, 0]
border2s = [None, len(data)//4, len(data)]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M':
self.data_x = data[border1:border2]
self.data_y = data[border1:border2]
self.label = label[border1:border2]
elif self.features=='S':
df_data = data[:, [self.target]]
self.data_x = df_data[border1:border2]
self.data_y = df_data[border1:border2]
self.label = label[border1:border2]
self.data_stamp = data_stamp
def preprocess(self, df):
"""returns normalized and standardized data.
"""
df = np.asarray(df, dtype=np.float32)
if len(df.shape) == 1:
raise ValueError('Data must be a 2-D array')
if np.any(sum(np.isnan(df)) != 0):
print('Data contains null values. Will be replaced with 0')
df = np.nan_to_num()
# normalize data
df = MinMaxScaler().fit_transform(df)
print('Data normalized')
return df
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = s_end + self.pred_len
seq_x = self.data_x[s_begin:s_end]
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
if self.flag == 'train':
return seq_x, seq_y, seq_x_mark, seq_y_mark
else:
seq_label = self.label[s_end:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark, seq_label
def __len__(self):
return len(self.data_x) - self.seq_len - self.pred_len + 1
class WADI(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='M', data_path='WADI_14days_downsampled.csv',
target='1_AIT_001_PV', scale=True):
# size [seq_len, label_len pred_len]
# info
if size == None:
self.seq_len = 8*60
self.label_len = 2*60
self.pred_len = 2*60
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
self.flag = flag
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
scaler = MinMaxScaler()
if self.flag == 'train':
df_raw = pd.read_csv(os.path.join(self.root_path,
'WADI_14days_downsampled.csv'))
if self.features=='M':
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
df_stamp = df_raw[['date']]
if self.scale:
data = scaler.fit_transform(df_data.values)
else:
data = df_data.values
self.data_x = data
self.data_y = data
else:
df_raw = pd.read_csv(os.path.join(self.root_path,
'WADI_attackdata_downsampled.csv'))
border1s = [0, 0, 0]
border2s = [None, len(df_raw)//4, len(df_raw)]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
df_stamp = df_raw[['date']][border1:border2]
if self.features=='M':
cols_data = df_raw.columns[1:-1]
df_data = df_raw[cols_data]
label = df_raw['label'].values
elif self.features=='S':
df_data = df_raw[[self.target]]
label = df_raw['label'].values
if self.scale:
data = scaler.fit_transform(df_data.values)
else:
data = df_data.values
self.data_x = data[border1:border2]
self.data_y = data[border1:border2]
self.label = label[border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
df_stamp['month'] = df_stamp.date.apply(lambda row:row.month,1)
df_stamp['day'] = df_stamp.date.apply(lambda row:row.day,1)
df_stamp['weekday'] = df_stamp.date.apply(lambda row:row.weekday(),1)
df_stamp['hour'] = df_stamp.date.apply(lambda row:row.hour,1)
df_stamp['minute'] = df_stamp.date.apply(lambda row:row.minute,1)
# df_stamp['minute'] = df_stamp.minute.map(lambda x:x//10)
df_stamp['second'] = df_stamp.date.apply(lambda row:row.second,1)
df_stamp['second'] = df_stamp.second.map(lambda x:x//10)
data_stamp = df_stamp.drop(['date'],1).values
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = s_end + self.pred_len
seq_x = self.data_x[s_begin:s_end]
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
if self.flag == 'train':
return seq_x, seq_y, seq_x_mark, seq_y_mark
else:
seq_label = self.label[s_end:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark, seq_label
def __len__(self):
return len(self.data_x) - self.seq_len - self.pred_len + 1
class SWaT(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='M', data_path='SWaT_normaldata_downsampled.csv',
target='FIT_101', scale=True):
# size [seq_len, label_len pred_len]
# info
if size == None:
self.seq_len = 8*60
self.label_len = 2*60
self.pred_len = 2*60
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
self.flag = flag
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
scaler = MinMaxScaler()
if self.flag == 'train':
df_raw = pd.read_csv(os.path.join(self.root_path,
'SWaT_normaldata_downsampled.csv'))
if self.features=='M':
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
df_stamp = df_raw[[' Timestamp']]
if self.scale:
data = scaler.fit_transform(df_data.values)
else:
data = df_data.values
self.data_x = data
self.data_y = data
else:
df_raw = pd.read_csv(os.path.join(self.root_path,
'SWaT_attackdata_downsampled.csv'))
border1s = [0, 0, 0]
border2s = [None, len(df_raw)//4, len(df_raw)]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
df_stamp = df_raw[[' Timestamp']][border1:border2]
if self.features=='M':
cols_data = df_raw.columns[1:-1]
df_data = df_raw[cols_data]
label = df_raw['Normal/Attack'].values
elif self.features=='S':
df_data = df_raw[[self.target]]
label = df_raw['Normal/Attack'].values
if self.scale:
data = scaler.fit_transform(df_data.values)
else:
data = df_data.values
self.data_x = data[border1:border2]
self.data_y = data[border1:border2]
self.label = label[border1:border2]
df_stamp[' Timestamp'] = | pd.to_datetime(df_stamp[' Timestamp']) | pandas.to_datetime |
import pandas as pd
from datetime import datetime, timedelta
from matplotlib import pyplot as plt
from matplotlib import dates as mpl_dates
plt.style.use('seaborn')
data = | pd.read_csv('data.csv') | pandas.read_csv |
import operator
from shutil import get_terminal_size
from typing import Dict, Hashable, List, Type, Union, cast
from warnings import warn
import numpy as np
from pandas._config import get_option
from pandas._libs import algos as libalgos, hashtable as htable
from pandas._typing import ArrayLike, Dtype, Ordered, Scalar
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
cache_readonly,
deprecate_kwarg,
doc,
)
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.dtypes.cast import (
coerce_indexer_dtype,
maybe_cast_to_extension_array,
maybe_infer_to_datetimelike,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
is_categorical_dtype,
is_datetime64_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
from pandas.core import ops
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algorithms
from pandas.core.algorithms import _get_data_algo, factorize, take, take_1d, unique1d
from pandas.core.array_algos.transforms import shift
from pandas.core.arrays.base import ExtensionArray, _extension_array_shared_docs
from pandas.core.base import NoNewAttributesMixin, PandasObject, _shared_docs
import pandas.core.common as com
from pandas.core.construction import array, extract_array, sanitize_array
from pandas.core.indexers import check_array_indexer, deprecate_ndim_indexing
from pandas.core.missing import interpolate_2d
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.sorting import nargsort
from pandas.io.formats import console
def _cat_compare_op(op):
opname = f"__{op.__name__}__"
@unpack_zerodim_and_defer(opname)
def func(self, other):
if is_list_like(other) and len(other) != len(self):
# TODO: Could this fail if the categories are listlike objects?
raise ValueError("Lengths must match.")
if not self.ordered:
if opname in ["__lt__", "__gt__", "__le__", "__ge__"]:
raise TypeError(
"Unordered Categoricals can only compare equality or not"
)
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = "Categoricals can only be compared if 'categories' are the same."
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif self.ordered and not (self.categories == other.categories).all():
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError(
"Categoricals can only be compared if 'ordered' is the same"
)
if not self.ordered and not self.categories.equals(other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
f = getattr(self._codes, opname)
ret = f(other_codes)
mask = (self._codes == -1) | (other_codes == -1)
if mask.any():
# In other series, the leads to False, so do that here too
if opname == "__ne__":
ret[(self._codes == -1) & (other_codes == -1)] = True
else:
ret[mask] = False
return ret
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
ret = getattr(self._codes, opname)(i)
if opname not in {"__eq__", "__ge__", "__gt__"}:
# check for NaN needed if we are not equal or larger
mask = self._codes == -1
ret[mask] = False
return ret
else:
if opname == "__eq__":
return np.zeros(len(self), dtype=bool)
elif opname == "__ne__":
return np.ones(len(self), dtype=bool)
else:
raise TypeError(
f"Cannot compare a Categorical for op {opname} with a "
"scalar, which is not a category."
)
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if opname in ["__eq__", "__ne__"]:
return getattr(np.array(self), opname)(np.array(other))
raise TypeError(
f"Cannot compare a Categorical for op {opname} with "
f"type {type(other)}.\nIf you want to compare values, "
"use 'np.asarray(cat) <op> other'."
)
func.__name__ = opname
return func
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except (KeyError, TypeError):
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
class Categorical(ExtensionArray, PandasObject):
"""
Represent a categorical variable in classic R / S-plus fashion.
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : bool, default False
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical.
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : bool
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
See Also
--------
CategoricalDtype : Type for categorical data.
CategoricalIndex : An Index with an underlying ``Categorical``.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html>`_
for more.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
# tolist is not actually deprecated, just suppressed in the __dir__
_deprecations = PandasObject._deprecations | frozenset(["tolist"])
_typ = "categorical"
def __init__(
self, values, categories=None, ordered=None, dtype=None, fastpath=False
):
dtype = CategoricalDtype._from_values_or_dtype(
values, categories, ordered, dtype
)
# At this point, dtype is always a CategoricalDtype, but
# we may have dtype.categories be None, and we need to
# infer categories in a factorization step further below
if fastpath:
self._codes = coerce_indexer_dtype(values, dtype.categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
# By convention, empty lists result in object dtype:
sanitize_dtype = np.dtype("O") if len(values) == 0 else None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError as err:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError(
"'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument."
) from err
except ValueError as err:
# FIXME
raise NotImplementedError(
"> 1 ndim Categorical are not supported at this time"
) from err
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values.dtype):
old_codes = (
values._values.codes if isinstance(values, ABCSeries) else values.codes
)
codes = recode_for_categories(
old_codes, values.dtype.categories, dtype.categories
)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = -np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""
The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if self.dtype.categories is not None and len(self.dtype.categories) != len(
new_dtype.categories
):
raise ValueError(
"new categories need to have the same number of "
"items as the old categories!"
)
self._dtype = new_dtype
@property
def ordered(self) -> Ordered:
"""
Whether the categories have an ordered relationship.
"""
return self.dtype.ordered
@property
def dtype(self) -> CategoricalDtype:
"""
The :class:`~pandas.api.types.CategoricalDtype` for this instance.
"""
return self._dtype
@property
def _constructor(self) -> Type["Categorical"]:
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def _formatter(self, boxed=False):
# Defer to CategoricalFormatter's formatter.
return None
def copy(self) -> "Categorical":
"""
Copy constructor.
"""
return self._constructor(
values=self._codes.copy(), dtype=self.dtype, fastpath=True
)
def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike:
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
"""
if is_categorical_dtype(dtype):
dtype = cast(Union[str, CategoricalDtype], dtype)
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
if is_extension_array_dtype(dtype):
return array(self, dtype=dtype, copy=copy) # type: ignore # GH 28770
if is_integer_dtype(dtype) and self.isna().any():
raise ValueError("Cannot convert float NaN to integer")
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def size(self) -> int:
"""
Return the len of myself.
"""
return self._codes.size
@cache_readonly
def itemsize(self) -> int:
"""
return the size of a single category
"""
return self.categories.itemsize
def tolist(self) -> List[Scalar]:
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
to_list = tolist
@classmethod
def _from_inferred_categories(
cls, inferred_categories, inferred_codes, dtype, true_values=None
):
"""
Construct a Categorical from inferred values.
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
true_values : list, optional
If none are provided, the default ones are
"True", "TRUE", and "true."
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (
isinstance(dtype, CategoricalDtype) and dtype.categories is not None
)
if known_categories:
# Convert to a specialized type with `dtype` if specified.
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors="coerce")
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors="coerce")
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors="coerce")
elif dtype.categories.is_boolean():
if true_values is None:
true_values = ["True", "TRUE", "true"]
cats = cats.isin(true_values)
if known_categories:
# Recode from observation order to dtype.categories order.
categories = dtype.categories
codes = recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# Sort categories and recode for unknown categories.
unsorted = cats.copy()
categories = cats.sort_values()
codes = recode_for_categories(inferred_codes, unsorted, categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories=None, ordered=None, dtype=None):
"""
Make a Categorical type from codes and categories or dtype.
This constructor is useful if you already have codes and
categories/dtype and so do not need the (computation intensive)
factorization step, which is usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like of int
An integer array, where each integer points to a category in
categories or dtype.categories, or else is -1 for NaN.
categories : index-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here, then they must be provided
in `dtype`.
ordered : bool, optional
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used together with
`categories` or `ordered`.
.. versionadded:: 0.24.0
When `dtype` is provided, neither `categories` nor `ordered`
should be provided.
Returns
-------
Categorical
Examples
--------
>>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True)
>>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype)
[a, b, a, b]
Categories (2, object): [a < b]
"""
dtype = CategoricalDtype._from_values_or_dtype(
categories=categories, ordered=ordered, dtype=dtype
)
if dtype.categories is None:
msg = (
"The categories must be provided in 'categories' or "
"'dtype'. Both were None."
)
raise ValueError(msg)
if is_extension_array_dtype(codes) and is_integer_dtype(codes):
# Avoid the implicit conversion of Int to object
if isna(codes).any():
raise ValueError("codes cannot contain NA values")
codes = codes.to_numpy(dtype=np.int64)
else:
codes = np.asarray(codes)
if len(codes) and not is_integer_dtype(codes):
raise ValueError("codes need to be array-like integers")
if len(codes) and (codes.max() >= len(dtype.categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and len(categories)-1")
return cls(codes, dtype=dtype, fastpath=True)
@property
def codes(self) -> np.ndarray:
"""
The category codes of this categorical.
Codes are an array of integers which are the positions of the actual
values in the categories array.
There is no setter, use the other categorical methods and the normal item
setter to change values in the categorical.
Returns
-------
ndarray[int]
A non-writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_categories(self, categories, fastpath=False):
"""
Sets new categories inplace
Parameters
----------
fastpath : bool, default False
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (
not fastpath
and self.dtype.categories is not None
and len(new_dtype.categories) != len(self.dtype.categories)
):
raise ValueError(
"new categories need to have the same number of "
"items than the old categories!"
)
self._dtype = new_dtype
def _set_dtype(self, dtype: CategoricalDtype) -> "Categorical":
"""
Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = recode_for_categories(self.codes, self.categories, dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Set the ordered attribute to the boolean value.
Parameters
----------
value : bool
Set whether this categorical is ordered (True) or not (False).
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to the value.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Set the Categorical to be ordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to True.
Returns
-------
Categorical
Ordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Set the Categorical to be unordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to False.
Returns
-------
Categorical
Unordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False, inplace=False):
"""
Set the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes, which does not considers a S1 string equal to a single char
python string.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, default False
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : bool, default False
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : bool, default False
Whether or not to reorder the categories in-place or return a copy
of this categorical with reordered categories.
Returns
-------
Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If new_categories does not validate as categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if cat.dtype.categories is not None and len(new_dtype.categories) < len(
cat.dtype.categories
):
# remove all _codes which are larger and set to -1/NaN
cat._codes[cat._codes >= len(new_dtype.categories)] = -1
else:
codes = recode_for_categories(
cat.codes, cat.categories, new_dtype.categories
)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
"""
Rename categories.
Parameters
----------
new_categories : list-like, dict-like or callable
New categories which will replace old categories.
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0.
inplace : bool, default False
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
See Also
--------
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item) for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
"""
Reorder categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : bool, default False
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
See Also
--------
rename_categories : Rename categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if set(self.dtype.categories) != set(new_categories):
raise ValueError(
"items in new_categories are not the same as in old categories"
)
return self.set_categories(new_categories, ordered=ordered, inplace=inplace)
def add_categories(self, new_categories, inplace=False):
"""
Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : bool, default False
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
raise ValueError(
f"new categories must not include old categories: {already_included}"
)
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
"""
Remove the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : bool, default False
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
Raises
------
ValueError
If the removals are not contained in the categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_list_like(removals):
removals = [removals]
removal_set = set(removals)
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = {x for x in not_included if notna(x)}
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
raise ValueError(f"removals must all be in old categories: {not_included}")
return self.set_categories(
new_categories, ordered=self.ordered, rename=False, inplace=inplace
)
def remove_unused_categories(self, inplace=False):
"""
Remove categories which are not used.
Parameters
----------
inplace : bool, default False
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(
new_categories, ordered=self.ordered
)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned. NaN values are unaffected.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(
self._codes.copy(), categories=new_categories, ordered=self.ordered
)
except ValueError:
# NA values are represented in self._codes with -1
# np.take causes NA values to take final element in new_categories
if np.any(self._codes == -1):
new_categories = new_categories.insert(len(new_categories), np.nan)
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op(operator.eq)
__ne__ = _cat_compare_op(operator.ne)
__lt__ = _cat_compare_op(operator.lt)
__gt__ = _cat_compare_op(operator.gt)
__le__ = _cat_compare_op(operator.le)
__ge__ = _cat_compare_op(operator.ge)
# for Series/ndarray like compat
@property
def shape(self):
"""
Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods, fill_value=None):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
fill_value : object, optional
The scalar value to use for newly introduced missing values.
.. versionadded:: 0.24.0
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
fill_value = self._validate_fill_value(fill_value)
codes = shift(codes.copy(), periods, axis=0, fill_value=fill_value)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def _validate_fill_value(self, fill_value):
"""
Convert a user-facing fill_value to a representation to use with our
underlying ndarray, raising ValueError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : int
Raises
------
ValueError
"""
if isna(fill_value):
fill_value = -1
elif fill_value in self.categories:
fill_value = self.categories.get_loc(fill_value)
else:
raise ValueError(
f"'fill_value={fill_value}' is not present "
"in this Categorical's categories"
)
return fill_value
def __array__(self, dtype=None) -> np.ndarray:
"""
The numpy array interface.
Returns
-------
numpy.array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype.
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
# for all other cases, raise for now (similarly as what happens in
# Series.__array_prepare__)
raise TypeError(
f"Object with dtype {self.dtype} cannot perform "
f"the numpy op {ufunc.__name__}"
)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception("invalid pickle state")
if "_dtype" not in state:
state["_dtype"] = CategoricalDtype(state["_categories"], state["_ordered"])
for k, v in state.items():
setattr(self, k, v)
@property
def T(self) -> "Categorical":
"""
Return transposed numpy array.
"""
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(deep=deep)
@doc(_shared_docs["searchsorted"], klass="Categorical")
def searchsorted(self, value, side="left", sorter=None):
# searchsorted is very performance sensitive. By converting codes
# to same dtype as self.codes, we get much faster performance.
if is_scalar(value):
codes = self.categories.get_loc(value)
codes = self.codes.dtype.type(codes)
else:
locs = [self.categories.get_loc(x) for x in value]
codes = np.array(locs, dtype=self.codes.dtype)
return self.codes.searchsorted(codes, side=side, sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See Also
--------
isna : Top-level isna.
isnull : Alias of isna.
Categorical.notna : Boolean inverse of Categorical.isna.
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See Also
--------
notna : Top-level notna.
notnull : Alias of notna.
Categorical.isna : Boolean inverse of Categorical.notna.
"""
return ~self.isna()
notnull = notna
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Return a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : bool, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = np.bincount(obs, minlength=ncat or 0)
else:
count = np.bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype, fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype="int64")
def _internal_get_values(self):
"""
Return the values.
For internal compatibility with pandas formatting.
Returns
-------
np.ndarray or Index
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods.
"""
# if we are a datetime and period index, return Index to keep metadata
if needs_i8_conversion(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
elif is_integer_dtype(self.categories) and -1 in self._codes:
return self.categories.astype("object").take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError(
f"Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n"
)
def _values_for_argsort(self):
return self._codes
def argsort(self, ascending=True, kind="quicksort", **kwargs):
"""
Return the indices that would sort the Categorical.
.. versionchanged:: 0.25.0
Changed to sort missing values at the end.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
**kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
numpy.array
See Also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
Missing values are placed at the end
>>> cat = pd.Categorical([2, None, 1])
>>> cat.argsort()
array([2, 0, 1])
"""
return super().argsort(ascending=ascending, kind=kind, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position="last"):
"""
Sort the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : bool, default False
Do operation in place.
ascending : bool, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2, 2, NaN, 5]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2, 2, 5, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2, 2, 5]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5, 2, 2]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if na_position not in ["last", "first"]:
raise ValueError(f"invalid na_position: {repr(na_position)}")
sorted_idx = nargsort(self, ascending=ascending, na_position=na_position)
if inplace:
self._codes = self._codes[sorted_idx]
else:
return self._constructor(
values=self._codes[sorted_idx], dtype=self.dtype, fastpath=True
)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy.array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype("float64")
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def view(self, dtype=None):
if dtype is not None:
raise NotImplementedError(dtype)
return self._constructor(values=self._codes, dtype=self.dtype, fastpath=True)
def to_dense(self):
"""
Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
warn(
"Categorical.to_dense is deprecated and will be removed in "
"a future version. Use np.asarray(cat) instead.",
FutureWarning,
stacklevel=2,
)
return np.asarray(self)
def fillna(self, value=None, method=None, limit=None):
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError(
"specifying a limit for fillna has not been implemented yet"
)
codes = self._codes
# pad / bfill
if method is not None:
# TODO: dispatch when self.categories is EA-dtype
values = np.asarray(self).reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None, value).astype(
self.categories.dtype
)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, (np.ndarray, Categorical, ABCSeries)):
# We get ndarray or Categorical if called via Series.fillna,
# where it will unwrap another aligned Series before getting here
mask = ~algorithms.isin(value, self.categories)
if not isna(value[mask]).all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(codes == -1)
codes = codes.copy()
codes[indexer] = values_codes[indexer]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError(
f"'value' parameter must be a scalar, dict "
f"or Series, but you passed a {type(value).__name__}"
)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take(self, indexer, allow_fill: bool = False, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of int
The indices in `self` to take. The meaning of negative values in
`indexer` depends on the value of `allow_fill`.
allow_fill : bool, default False
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 1.0.0
Default value changed from ``True`` to ``False``.
fill_value : object
The value to use for `indices` that are missing (-1), when
``allow_fill=True``. This should be the category, i.e. a value
in ``self.categories``, not a code.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
See Also
--------
Series.take : Similar method for Series.
numpy.ndarray.take : Similar method for NumPy arrays.
Examples
--------
>>> cat = pd.Categorical(['a', 'a', 'b'])
>>> cat
[a, a, b]
Categories (2, object): [a, b]
Specify ``allow_fill==False`` to have negative indices mean indexing
from the right.
>>> cat.take([0, -1, -2], allow_fill=False)
[a, b, a]
Categories (2, object): [a, b]
With ``allow_fill=True``, indices equal to ``-1`` mean "missing"
values that should be filled with the `fill_value`, which is
``np.nan`` by default.
>>> cat.take([0, -1, -1], allow_fill=True)
[a, NaN, NaN]
Categories (2, object): [a, b]
The fill value can be specified.
>>> cat.take([0, -1, -1], allow_fill=True, fill_value='a')
[a, a, a]
Categories (2, object): [a, b]
Specifying a fill value that's not in ``self.categories``
will raise a ``TypeError``.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill:
# convert user-provided `fill_value` to codes
fill_value = self._validate_fill_value(fill_value)
codes = take(self._codes, indexer, allow_fill=allow_fill, fill_value=fill_value)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill: bool = False, fill_value=None):
# GH#27745 deprecate alias that other EAs dont have
warn(
"Categorical.take_nd is deprecated, use Categorical.take instead",
FutureWarning,
stacklevel=2,
)
return self.take(indexer, allow_fill=allow_fill, fill_value=fill_value)
def __len__(self) -> int:
"""
The length of this Categorical.
"""
return len(self._codes)
def __iter__(self):
"""
Returns an Iterator over the values of this Categorical.
"""
return iter(self._internal_get_values().tolist())
def __contains__(self, key) -> bool:
"""
Returns True if `key` is in this Categorical.
"""
# if key is a NaN, check if any NaN is in self.
if is_scalar(key) and isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True) -> str:
"""
a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num) :]._get_repr(length=False, footer=False)
result = f"{head[:-1]}, ..., {tail[1:]}"
if footer:
result = f"{result}\n{self._repr_footer()}"
return str(result)
def _repr_categories(self):
"""
return the base repr for the categories
"""
max_categories = (
10
if get_option("display.max_categories") == 0
else get_option("display.max_categories")
)
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self) -> str:
"""
Returns a string representation of the footer.
"""
category_strs = self._repr_categories()
dtype = str(self.categories.dtype)
levheader = f"Categories ({len(self.categories)}, {dtype}): "
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self) -> str:
info = self._repr_categories_info()
return f"Length: {len(self)}\n{info}"
def _get_repr(self, length=True, na_rep="NaN", footer=True) -> str:
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(
self, length=length, na_rep=na_rep, footer=footer
)
result = formatter.to_string()
return str(result)
def __repr__(self) -> str:
"""
String representation.
"""
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = f"[], {msg}"
return result
def _maybe_coerce_indexer(self, indexer):
"""
return an indexer coerced to the codes dtype
"""
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == "i":
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
"""
Return an item.
"""
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
key = check_array_indexer(self, key)
result = self._codes[key]
if result.ndim > 1:
deprecate_ndim_indexing(result)
return result
return self._constructor(result, dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
"""
Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
value = extract_array(value, extract_numpy=True)
# require identical categories set
if isinstance(value, Categorical):
if not is_dtype_equal(self, value):
raise ValueError(
"Cannot set a Categorical with another, "
"without identical categories"
)
if not self.categories.equals(value.categories):
new_codes = recode_for_categories(
value.codes, value.categories, self.categories
)
value = Categorical.from_codes(new_codes, dtype=self.dtype)
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError(
"Cannot setitem on a Categorical with a new "
"category, set the categories first"
)
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# else: array of True/False in Series or Categorical
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
key = check_array_indexer(self, key)
self._codes[key] = lindexer
def _reverse_indexer(self) -> Dict[Hashable, np.ndarray]:
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Examples
--------
>>> c = pd.Categorical(list('aabca'))
>>> c
[a, a, b, c, a]
Categories (3, object): [a, b, c]
>>> c.categories
Index(['a', 'b', 'c'], dtype='object')
>>> c.codes
array([0, 0, 1, 2, 0], dtype=int8)
>>> c._reverse_indexer()
{'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(
self.codes.astype("int64"), categories.size
)
counts = counts.cumsum()
_result = (r[start:end] for start, end in zip(counts, counts[1:]))
result = dict(zip(categories, _result))
return result
# reduction ops #
def _reduce(self, name, axis=0, **kwargs):
func = getattr(self, name, None)
if func is None:
raise TypeError(f"Categorical cannot perform the operation {name}")
return func(**kwargs)
@deprecate_kwarg(old_arg_name="numeric_only", new_arg_name="skipna")
def min(self, skipna=True):
"""
The minimum value of the object.
Only ordered `Categoricals` have a minimum!
.. versionchanged:: 1.0.0
Returns an NA value on empty arrays
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered("min")
if not len(self._codes):
return self.dtype.na_value
good = self._codes != -1
if not good.all():
if skipna and good.any():
pointer = self._codes[good].min()
else:
return np.nan
else:
pointer = self._codes.min()
return self.categories[pointer]
@deprecate_kwarg(old_arg_name="numeric_only", new_arg_name="skipna")
def max(self, skipna=True):
"""
The maximum value of the object.
Only ordered `Categoricals` have a maximum!
.. versionchanged:: 1.0.0
Returns an NA value on empty arrays
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered("max")
if not len(self._codes):
return self.dtype.na_value
good = self._codes != -1
if not good.all():
if skipna and good.any():
pointer = self._codes[good].max()
else:
return np.nan
else:
pointer = self._codes.max()
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
See Also
--------
pandas.unique
CategoricalIndex.unique
Series.unique
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list("baabc")).unique()
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list("baabc"), categories=list("abc")).unique()
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(
... list("baabc"), categories=list("abc"), ordered=True
... ).unique()
[b, a, c]
Categories (3, object): [a < b < c]
"""
# unlike np.unique, unique1d does not sort
unique_codes = | unique1d(self.codes) | pandas.core.algorithms.unique1d |
import numpy as np
import pandas as pd
from numpy import inf, nan
from numpy.testing import assert_array_almost_equal, assert_array_equal
from pandas import DataFrame, Series, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from shapely.geometry.point import Point
from pymove import MoveDataFrame
from pymove.utils import integration
from pymove.utils.constants import (
ADDRESS,
CITY,
DATETIME,
DIST_EVENT,
DIST_HOME,
DIST_POI,
EVENT_ID,
EVENT_TYPE,
GEOMETRY,
HOME,
ID_POI,
LATITUDE,
LONGITUDE,
NAME_POI,
POI,
TRAJ_ID,
TYPE_POI,
VIOLATING,
)
list_random_banks = [
[39.984094, 116.319236, 1, 'bank'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'bancos_postos'],
[39.984211, 116.319389, 4, 'randomvalue'],
[39.984217, 116.319422, 5, 'bancos_PAE'],
[39.984710, 116.319865, 6, 'bancos_postos'],
[39.984674, 116.319810, 7, 'bancos_agencias'],
[39.984623, 116.319773, 8, 'bancos_filiais'],
[39.984606, 116.319732, 9, 'banks'],
[39.984555, 116.319728, 10, 'banks']
]
list_random_bus_station = [
[39.984094, 116.319236, 1, 'transit_station'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'transit_station'],
[39.984211, 116.319389, 4, 'pontos_de_onibus'],
[39.984217, 116.319422, 5, 'transit_station'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, 'bus_station'],
[39.984623, 116.319773, 8, 'bus_station'],
]
list_random_bar_restaurant = [
[39.984094, 116.319236, 1, 'restaurant'],
[39.984198, 116.319322, 2, 'restaurant'],
[39.984224, 116.319402, 3, 'randomvalue'],
[39.984211, 116.319389, 4, 'bar'],
[39.984217, 116.319422, 5, 'bar'],
[39.984710, 116.319865, 6, 'bar-restaurant'],
[39.984674, 116.319810, 7, 'random123'],
[39.984623, 116.319773, 8, '123'],
]
list_random_parks = [
[39.984094, 116.319236, 1, 'pracas_e_parques'],
[39.984198, 116.319322, 2, 'park'],
[39.984224, 116.319402, 3, 'parks'],
[39.984211, 116.319389, 4, 'random'],
[39.984217, 116.319422, 5, '123'],
[39.984710, 116.319865, 6, 'park'],
[39.984674, 116.319810, 7, 'parks'],
[39.984623, 116.319773, 8, 'pracas_e_parques'],
]
list_random_police = [
[39.984094, 116.319236, 1, 'distritos_policiais'],
[39.984198, 116.319322, 2, 'police'],
[39.984224, 116.319402, 3, 'police'],
[39.984211, 116.319389, 4, 'distritos_policiais'],
[39.984217, 116.319422, 5, 'random'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, '123'],
[39.984623, 116.319773, 8, 'bus_station'],
]
list_move = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'), 1],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'), 1],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'), 1],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'), 2],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3],
]
list_pois = [
[39.984094, 116.319236, 1, 'policia', 'distrito_pol_1'],
[39.991013, 116.326384, 2, 'policia', 'policia_federal'],
[40.01, 116.312615, 3, 'comercio', 'supermercado_aroldo'],
[40.013821, 116.306531, 4, 'show', 'forro_tropykalia'],
[40.008099, 116.31771100000002, 5, 'risca-faca',
'rinha_de_galo_world_cup'],
[39.985704, 116.326877, 6, 'evento', 'adocao_de_animais'],
[39.979393, 116.3119, 7, 'show', 'dia_do_municipio']
]
# Testes de Unions
def test_union_poi_bank():
pois_df = DataFrame(
data=list_random_banks,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'banks'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'banks'],
[39.984211, 116.319389, 4, 'randomvalue'],
[39.984217, 116.319422, 5, 'banks'],
[39.984710, 116.319865, 6, 'banks'],
[39.984674, 116.319810, 7, 'banks'],
[39.984623, 116.319773, 8, 'banks'],
[39.984606, 116.319732, 9, 'banks'],
[39.984555, 116.319728, 10, 'banks']
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
integration.union_poi_bank(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_bus_station():
pois_df = DataFrame(
data=list_random_bus_station,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'bus_station'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'bus_station'],
[39.984211, 116.319389, 4, 'bus_station'],
[39.984217, 116.319422, 5, 'bus_station'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, 'bus_station'],
[39.984623, 116.319773, 8, 'bus_station'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_bus_station(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_bar_restaurant():
pois_df = DataFrame(
data=list_random_bar_restaurant,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'bar-restaurant'],
[39.984198, 116.319322, 2, 'bar-restaurant'],
[39.984224, 116.319402, 3, 'randomvalue'],
[39.984211, 116.319389, 4, 'bar-restaurant'],
[39.984217, 116.319422, 5, 'bar-restaurant'],
[39.984710, 116.319865, 6, 'bar-restaurant'],
[39.984674, 116.319810, 7, 'random123'],
[39.984623, 116.319773, 8, '123'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_bar_restaurant(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_parks():
pois_df = DataFrame(
data=list_random_parks,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'parks'],
[39.984198, 116.319322, 2, 'parks'],
[39.984224, 116.319402, 3, 'parks'],
[39.984211, 116.319389, 4, 'random'],
[39.984217, 116.319422, 5, '123'],
[39.984710, 116.319865, 6, 'parks'],
[39.984674, 116.319810, 7, 'parks'],
[39.984623, 116.319773, 8, 'parks'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_parks(pois_df, TYPE_POI, inplace=True)
| assert_frame_equal(pois_df, expected) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python
# coding: utf-8
import xlrd
import numpy as np
from math import sqrt
import pandas as pd
import time
import datetime
import matplotlib.pyplot as plt
import math
import random as rd
import calendar
import torch
from torch import nn
from torch.autograd import Variable
from sklearn.preprocessing import minmax_scale
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn import preprocessing
import csv
# Global variables
record_path = '../data/record/'
weather_path = '../data/weather/'
park_all_cnt = 10
weather_all_cnt = 6
park_table_id = ['P1','P2','P3','P4','P5','P6','P7','P8','P9','P10']
park_weather_idx = [0,0,1,1,1,2,2,2,2,2]
weather_name = ['Ningbo','Ningbo Yinzhou','Changsha']
# util function
def read_park_table(index, debug = False):
park_table_path = record_path + park_table_id[index] + '.csv'
park_book = pd.read_csv(park_table_path,encoding='ISO-8859-1')##########
if debug:
print('open table ' + park_table_name[i] + ' with lines ' + str(len(park_book)))
return park_book
def read_weather_table(index, debug = False):
weather_table_path = weather_path + str(index) + '.csv'
weather_book = pd.read_csv(weather_table_path,encoding='ISO-8859-1')
if debug:
print ('open table ' + weather_name[i] + ' with lines ' + str(len(weather_book)))
return weather_book
def trans_record_to_count(data, debug = False):
invalid_record = 0
valid_record = 0
p_dict = {}
for stime,etime in zip(data['Lockdown Time'],data['Lockup Time']):
start_tss = time.strptime(stime, "%Y/%m/%d %H:%M")##########
end_tss = time.strptime(etime, "%Y/%m/%d %H:%M")#########
# Converts start and end times to seconds
start_tsp = int(time.mktime(start_tss))
end_tsp = int(time.mktime(end_tss))
# A parking record which has duration less than 5 mins are regard as invalid record
if end_tsp - start_tsp <= 5*60:
invalid_record = invalid_record + 1
continue
valid_record = valid_record + 1
start_hour = int(start_tsp//(60*60))
end_hour = int(end_tsp//(60*60))
# Calculate the parking numbers per hour
for j in range(start_hour,end_hour+1):
if j not in p_dict:
p_dict[j] = {}
p_dict[j]['cnt'] = 1
else:
p_dict[j]['cnt'] = p_dict[j]['cnt'] + 1
if debug:
print('valid record is ' + str(valid_record))
print('invalid record is ' + str(invalid_record))
return p_dict
def calc_park_cnt_from_dict(p_dict, debug = False):
if debug:
print('calcing parking count from dict ...')
park_cnt = []
st = min(p_dict.keys())
ed = max(p_dict.keys())
for i in range(st,ed+1):
if i in p_dict:
park_cnt.append(p_dict[i]['cnt'])
else:
park_cnt.append(0)
return park_cnt
def process_weather(data, debug= False):
output = []
start_h = data['DAY'][0]
start_h = int(time.mktime(time.strptime(start_h,"%Y/%m/%d %H:%M")) // (60*60))############
output.append(start_h)
for i in range(5):
output.append([])
output.append({})
for i in range(len(data['HOUR'])):
output[1].append(data['TEM'][i])
output[2].append(data['RHU'][i])
output[3].append(data['WIN_S'][i])
output[4].append(data['PRE_1h'][i])
output[5].append(time.strptime(data['DAY'][i],"%Y/%m/%d %H:%M").tm_wday)##############
output[6][int(time.mktime(time.strptime(data['DAY'][i],"%Y/%m/%d %H:%M")) // (60*60))] = i############
return output
def invalid(w_list,idx):
if w_list[1][idx] > 999:
return True
if w_list[2][idx] > 999:
return True
if w_list[3][idx] > 999:
return True
if w_list[4][idx] > 999:
return True
return False
def gen_series(park_cnt, weather_rec, start_h, end_h, debug=False):
tt = []
for i in range(len(park_cnt)):
tt.append(start_h + i)
"""if debug:
print(tt[-1])"""
temp = []
for i in range(5):
temp.append([])
for i in range(len(park_cnt)):
if tt[i] in weather_rec[6]:
idx = weather_rec[6][tt[i]]
if invalid(weather_rec,idx):
continue
temp[0].append(park_cnt[i])
temp[1].append(weather_rec[1][idx])
temp[2].append(weather_rec[2][idx])
temp[3].append(weather_rec[3][idx])
temp[4].append(weather_rec[4][idx])
#if debug:
#print('The length of temp array is ' + str(len(temp[0])))
park_cnt = pd.Series(temp[0], name='cnt')
tem = pd.Series(temp[1], name='tem')
rhu = pd.Series(temp[2], name='rhu')
winds = pd.Series(temp[3], name='wind_s')
pre_1h = | pd.Series(temp[4],name='pre_ih') | pandas.Series |
import os
import gentle
import numpy as np
import pandas as pd
import codecs
import logging
import time
import datetime
import math
import wave
import contextlib
import librosa
import librosa.display
import pathlib
import matplotlib.pyplot as plt
#import sounddevice as sd
import operator
from shutil import copyfile
from tqdm import tqdm
from pprint import pprint
from ffmpy import FFmpeg
from trimmer import get_vad_ranges
# DOWNLOAD THE DB AND CHANGE THIS PATH
#path='/data2/sungjaecho/data_tts/EmoV-DB/EmoV-DB_sorted'
resources = gentle.Resources()
emov_db_path = '/data4/data/EmoV-DB'
emov_db_16000 = '02_EmoV-DB-sr-16000'
#emov_db_version = '01_EmoV-DB-original'
#emov_db_version = '02_EmoV-DB-sr-22050'
#emov_db_version = '02_EmoV-DB-sr-16000'
emov_db_version = '03_EmoV-DB-sr-22050-trim-vad'
path_alignments = 'alignments/EmoV-DB_sorted'
people_list = ['bea', 'jenie', 'josh', 'sam']
emo_list = ['Amused', 'Angry', 'Disgusted', 'Neutral', 'Sleepy']
data_stat_path = 'data_stat'
emo_csv_name = 'emov_db.csv'
path_emov_db = os.path.join(emov_db_path, emov_db_version)
emo_csv_path = os.path.join(data_stat_path, emov_db_version, emo_csv_name)
def on_progress(p):
for k,v in p.items():
logging.debug("%s: %s" % (k, v))
def load_emov_db_postprocessed():
db_path = os.path.join(emov_db_path, '02_EmoV-DB-sr-22050', 'emov_db_postprocessed.xlsx')
df = pd.read_excel(db_path)
return df
def load_emov_db(path_to_EmoV_DB=None, load_csv=False, load_script_from_postprocessed=True):
if load_csv and os.path.exists(emo_csv_path):
data = load_csv_db()
print("DB loaded from {} !".format(emo_csv_path))
return data
print('Start to load wavs.')
script = os.path.join(path_to_EmoV_DB, 'cmuarctic.data')
lines = codecs.open(script, 'r', 'utf-8').readlines()
df_pp = load_emov_db_postprocessed()
# in our database, we use only files beginning with arctic_a. And the number of these sentences correspond.
# Here we build a dataframe with number and text of each of these lines
sentences = []
for line in lines:
temp = {}
idx_n_0 = line.find('arctic_a') + len('arctic_a')
if line.find('arctic_a') != -1:
#print(line)
#print(idx_n_0)
idx_n_end = idx_n_0 + 4
number = line[idx_n_0:idx_n_end]
#print(number)
temp['n'] = number
idx_text_0 = idx_n_end + 2
text = line.strip()[idx_text_0:-3]
temp['text'] = text
# print(text)
sentences.append(temp)
sentences = | pd.DataFrame(sentences) | pandas.DataFrame |
"""
snhpdata.py - household projections
"""
import os.path
import zipfile
import pandas as pd
import requests
import tempfile
from openpyxl import load_workbook
import ukpopulation.utils as utils
import pyexcel
class SNHPData:
"""
Functionality for downloading and collating UK Subnational Household Projection (SNHP) data
"""
def __init__(self, cache_dir=utils.default_cache_dir()):
self.cache_dir = cache_dir
self.data = {}
self.data[utils.EN] = self.__do_england()
self.data[utils.WA] = self.__do_wales()
self.data[utils.SC] = self.__do_scotland()
self.data[utils.NI] = self.__do_nireland()
def unified(self):
"""
Creates a unified dataset containing values for
- the year range present in all datasets (2016-2039 at time of typing)
- a lowest-common denimator set of household types (that maps to OA-level census categories)
"""
raise NotImplementedError("The categories used in each country have no clear common denominator")
# TODO best I can do is probably differentiate between single-person, multi-person including children, and multi-person no children households
def min_year(self, code):
"""
Returns the first year in the projection, assumes a single LAD or country code
"""
# convert to country if necessary
if "0" in code:
code = utils.country(code)[0]
return min(self.data[code].PROJECTED_YEAR_NAME.unique().astype(int))
def max_year(self, code):
"""
Returns the final year in the projection, assumes a single LAD or country code
"""
# convert to country if necessary
if "0" in code:
code = utils.country(code)[0]
return max(self.data[code].PROJECTED_YEAR_NAME.unique().astype(int))
def filter(self, categories, geog_codes, years=None):
# see unified...
raise NotImplementedError("The categories used in each country have no clear common denominator")
def aggregate(self, geog_codes, years=None):
""" Returns aggregate counts of household for specified geographies and years """
# convert geog_codes and years to arrays if single values supplied (for isin)
if isinstance(geog_codes, str):
geog_codes = [geog_codes]
countries = utils.country(geog_codes)
# TODO fix incorrect assumption is that all countries have the same year range
years = utils.trim_range(years, self.min_year(countries[0]), self.max_year(countries[0]))
retval = pd.DataFrame()
# loop over datasets as needed
for country in countries:
# apply filters
retval = retval.append(self.data[country][(self.data[country].GEOGRAPHY_CODE.isin(geog_codes)) &
(self.data[country].PROJECTED_YEAR_NAME.isin(years))] \
, ignore_index=True, sort=False)
return retval.groupby(["GEOGRAPHY_CODE", "PROJECTED_YEAR_NAME"]).sum().reset_index()
def __do_england(self):
print("Collating SNHP data for England...")
england_src = "https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/populationandmigration/populationprojections/datasets/householdprojectionsforenglanddetaileddataformodellingandanalysis/2016based/detailedtablesstage1and2.zip"
england_raw = os.path.join(self.cache_dir, os.path.basename(england_src))
england_processed = self.cache_dir + "/snhp_e.csv"
if os.path.isfile(england_processed):
snhp_e = pd.read_csv(england_processed)
else:
response = requests.get(england_src)
with open(england_raw, 'wb') as fd:
for chunk in response.iter_content(chunk_size=1024):
fd.write(chunk)
print("Downloaded", england_raw)
# this doesnt work if you directly supply the file in the zip to load_workbook
# workaround is to extract the file to a tmp dir and load from there
z = zipfile.ZipFile(england_raw)
tmpdir = tempfile.TemporaryDirectory().name
# print(tmpdir)
z.extract("detailedtablesstage1and2/s2 Households.xlsx", tmpdir)
sheet = load_workbook(os.path.join(tmpdir, "detailedtablesstage1and2/s2 Households.xlsx"), read_only=True)[
"Households"]
raw = utils.read_cell_range(sheet, "A7", "AS32263")
snhp_e = pd.DataFrame(raw[1:, :], columns=raw[0, :])
# remove years before 2011 census and switch years from columns to rows
snhp_e = snhp_e.drop([str(y) for y in range(2001, 2011)], axis=1) \
.melt(id_vars=["CODE", "AREA", "AGE GROUP", "HOUSEHOLD TYPE"]).drop("AREA", axis=1)
# ensure count is numeric
snhp_e.value = snhp_e.value.astype(float)
# remove age categories and standardise column names
snhp_e = snhp_e.groupby(["CODE", "HOUSEHOLD TYPE", "variable"]).sum().reset_index() \
.rename({"CODE": "GEOGRAPHY_CODE",
"HOUSEHOLD TYPE": "HOUSEHOLD_TYPE",
"variable": "PROJECTED_YEAR_NAME",
"value": "OBS_VALUE"}, axis=1)
snhp_e.to_csv(england_processed, index=False)
return snhp_e
def __do_wales(self):
print("Collating SNHP data for Wales...")
wales_raw = self.cache_dir + "/snhp_w.csv"
if not os.path.isfile(wales_raw):
fields = ['Area_AltCode1', 'Year_Code', 'Data', 'Area_Hierarchy', 'Variant_Code',
'Householdtype_ItemName_ENG']
# StatsWales is an OData endpoint, so select fields of interest
url = "http://open.statswales.gov.wales/dataset/hous0115?$select={}".format(",".join(fields))
# use OData syntax to filter P (persons), AllAges (all ages), Area_Hierarchy 596 (LADs)
url += "&$filter=Variant_Code eq 1 and Area_Hierarchy eq 'W92000004'" # Householdtype_ItemName_ENG
data = []
while True:
print(url)
r = requests.get(url)
r.raise_for_status()
r_data = r.json()
data += r_data['value']
if "odata.nextLink" in r_data:
url = r_data["odata.nextLink"]
else:
break
snhp_w = pd.DataFrame(data)
# # Remove unwanted and rename wanted columns
snhp_w = snhp_w.drop(["Area_Hierarchy", "Variant_Code"], axis=1)
snhp_w = snhp_w.rename(columns={"Area_AltCode1": "GEOGRAPHY_CODE",
"Data": "OBS_VALUE",
"Householdtype_ItemName_ENG": "HOUSEHOLD_TYPE",
"Year_Code": "PROJECTED_YEAR_NAME"})
# reinstate + signs that went missing
snhp_w.HOUSEHOLD_TYPE.replace(
['4 person (2 adults, 1 children)', '5 person (No children)', '5 person (2 adults, 1 children)',
'5 person (1 adult, 4 children)'],
['4 person (2+ adults, 1+ children)', '5+ person (No children)', '5+ person (2+ adults, 1+ children)',
'5+ person (1 adult, 4+ children)'],
inplace=True)
# Drop occupancy and population (for now, might be useful?)
snhp_w = snhp_w[~snhp_w.HOUSEHOLD_TYPE.isin(
["Households", "Projected Private Household Population", "Average Household Size"])]
snhp_w.to_csv(wales_raw, index=False)
# this avoids any issues with the index (which was dropped on save)
snhp_w = pd.read_csv(wales_raw)
return snhp_w
def __do_scotland(self):
print("Collating SNHP data for Scotland...")
scotland_processed = os.path.join(self.cache_dir, "snhp_s.csv")
scotland_src = "https://www.nrscotland.gov.uk/files//statistics/household-projections/16/2016-house-proj-detailed-coun-princ.zip"
scotland_raw = os.path.join(self.cache_dir, os.path.basename(scotland_src))
if os.path.isfile(scotland_processed):
snhp_s = pd.read_csv(scotland_processed)
else:
response = requests.get(scotland_src)
with open(scotland_raw, 'wb') as fd:
for chunk in response.iter_content(chunk_size=1024):
fd.write(chunk)
print("Downloaded", scotland_raw)
lookup = {"Aberdeen City": "S12000033",
"Aberdeenshire": "S12000034",
"Angus": "S12000041",
"Argyll and Bute": "S12000035",
"Scottish Borders": "S12000026",
"Clackmannanshire": "S12000005",
"West Dunbartonshire": "S12000039",
"Dumfries and Galloway": "S12000006",
"Dundee City": "S12000042",
"East Ayrshire": "S12000008",
"East Dunbartonshire": "S12000045",
"East Lothian": "S12000010",
"East Renfrewshire": "S12000011",
"City of Edinburgh": "S12000036",
"Falkirk": "S12000014",
"Fife": "S12000015",
"Glasgow City": "S12000046",
"Highland": "S12000017",
"Inverclyde": "S12000018",
"Midlothian": "S12000019",
"Moray": "S12000020",
"North Ayrshire": "S12000021",
"North Lanarkshire": "S12000044",
"Orkney Islands": "S12000023",
"Perth and Kinross": "S12000024",
"Renfrewshire": "S12000038",
"Shetland Islands": "S12000027",
"South Ayrshire": "S12000028",
"South Lanarkshire": "S12000029",
"Stirling": "S12000030",
"West Lothian": "S12000040",
"Na h-Eileanan Siar": "S12000013"}
z = zipfile.ZipFile(scotland_raw)
snhp_s = | pd.DataFrame() | pandas.DataFrame |
from abc import abstractmethod
import datetime as dt
import numpy as np
from numpy.random import RandomState
from numpy.testing import assert_equal
from pandas import DataFrame, Series, Timedelta, date_range
import pytest
from arch import doc
from arch.univariate.base import implicit_constant
from arch.utility.array import (
ConcreteClassMeta,
DocStringInheritor,
cutoff_to_index,
date_to_index,
ensure1d,
ensure2d,
find_index,
parse_dataframe,
)
@pytest.fixture(scope="function")
def rng():
return RandomState(12345)
def test_ensure1d():
out = ensure1d(1.0, "y")
assert_equal(out, np.array([1.0]))
out = ensure1d(np.arange(5.0), "y")
assert_equal(out, np.arange(5.0))
out = ensure1d(np.arange(5.0)[:, None], "y")
assert_equal(out, np.arange(5.0))
in_array = np.reshape(np.arange(16.0), (4, 4))
with pytest.raises(ValueError):
ensure1d(in_array, "y")
y = Series(np.arange(5.0))
ys = ensure1d(y, "y")
assert isinstance(ys, np.ndarray)
ys = ensure1d(y, "y", True)
assert isinstance(ys, Series)
y = DataFrame(y)
ys = ensure1d(y, "y")
assert isinstance(ys, np.ndarray)
ys = ensure1d(y, "y", True)
assert isinstance(ys, Series)
y.columns = [1]
ys = ensure1d(y, "y", True)
assert isinstance(ys, Series)
assert ys.name == "1"
y = Series(np.arange(5.0), name="series")
ys = ensure1d(y, "y")
assert isinstance(ys, np.ndarray)
ys = ensure1d(y, "y", True)
assert isinstance(ys, Series)
y = DataFrame(y)
ys = ensure1d(y, "y")
assert isinstance(ys, np.ndarray)
ys = ensure1d(y, "y", True)
assert isinstance(ys, Series)
ys.name = 1
ys = ensure1d(ys, None, True)
assert isinstance(ys, Series)
assert ys.name == "1"
y = DataFrame(np.reshape(np.arange(10), (5, 2)))
with pytest.raises(ValueError):
ensure1d(y, "y")
def test_ensure2d():
s = Series([1, 2, 3], name="x")
df = ensure2d(s, "x")
assert isinstance(df, DataFrame)
df2 = ensure2d(df, "x")
assert df is df2
npa = ensure2d(s.values, "x")
assert isinstance(npa, np.ndarray)
assert npa.ndim == 2
npa = ensure2d(np.array(1.0), "x")
assert isinstance(npa, np.ndarray)
assert npa.ndim == 2
with pytest.raises(ValueError):
ensure2d(np.array([[[1]]]), "x")
with pytest.raises(TypeError):
ensure2d([1], "x")
def test_parse_dataframe():
s = Series(np.arange(10.0), name="variable")
out = parse_dataframe(s, "y")
assert_equal(out[1], np.arange(10.0))
assert_equal(out[0], ["variable"])
df = DataFrame(s)
out = parse_dataframe(df, "y")
assert_equal(out[1], np.arange(10.0))
assert_equal(out[0], ["variable"])
out = parse_dataframe(np.arange(10.0), "y")
assert_equal(out[1], np.arange(10.0))
assert_equal(out[0], ["y"])
out = parse_dataframe(None, "name")
assert out[0] == ["name"]
assert isinstance(out[1], np.ndarray)
assert out[1].shape == (0,)
def test_implicit_constant(rng):
x = rng.standard_normal((1000, 2))
assert not implicit_constant(x)
x[:, 0] = 1.0
assert implicit_constant(x)
x = rng.standard_normal((1000, 3))
x[:, 0] = x[:, 0] > 0
x[:, 1] = 1 - x[:, 0]
assert implicit_constant(x)
def test_docstring_inheritor():
class A(object, metaclass=DocStringInheritor):
"""
Docstring
"""
class B(A):
pass
assert_equal(B.__doc__, A.__doc__)
def test_date_to_index():
dr = date_range("20000101", periods=3000, freq="W")
y = Series(np.arange(3000.0), index=dr)
date_index = y.index
index = date_to_index(date_index[0], date_index)
assert_equal(index, 0)
index = date_to_index(date_index[-1], date_index)
assert_equal(index, date_index.shape[0] - 1)
index = date_to_index("2009-08-02", date_index)
assert_equal(index, 500)
index = date_to_index("2009-08-04", date_index)
assert_equal(index, 501)
index = date_to_index("2009-08-01", date_index)
assert_equal(index, 500)
index = date_to_index(dt.datetime(2009, 8, 1), date_index)
assert_equal(index, 500)
with pytest.raises(ValueError):
date_to_index(dt.date(2009, 8, 1), date_index)
z = y + 0.0
z.index = np.arange(3000)
num_index = z.index
with pytest.raises(ValueError):
date_to_index(dt.datetime(2009, 8, 1), num_index)
idx = date_range("1999-12-31", periods=3)
df = DataFrame([1, 2, 3], index=idx[::-1])
with pytest.raises(ValueError):
date_to_index(idx[0], df.index)
df = DataFrame([1, 2, 3], index=[idx[0]] * 3)
with pytest.raises(ValueError):
date_to_index(idx[0], df.index)
with pytest.raises(ValueError):
date_to_index("NaT", idx)
# check whether this also works for a localized datetimeindex
date_index = date_range("20000101", periods=3000, freq="W", tz="Europe/Berlin")
index = date_to_index(date_index[0], date_index)
assert_equal(index, 0)
def test_date_to_index_timestamp():
dr = date_range("20000101", periods=3000, freq="W")
y = Series(np.arange(3000.0), index=dr)
date_index = y.index
date = y.index[1000]
date_pydt = date.to_pydatetime()
date_npdt = date.to_datetime64()
date_str = date_pydt.strftime("%Y-%m-%d")
index = date_to_index(date, date_index)
index_pydt = date_to_index(date_pydt, date_index)
index_npdt = date_to_index(date_npdt, date_index)
index_str = date_to_index(date_str, date_index)
assert_equal(index, 1000)
assert_equal(index, index_npdt)
assert_equal(index, index_pydt)
assert_equal(index, index_str)
def test_():
dr = date_range("20000101", periods=3000, freq="W")
y = Series(np.arange(3000.0), index=dr)
date_index = y.index
date = date_index[1000] + Timedelta(1, "D")
date_pydt = date.to_pydatetime()
date_npdt = date.to_datetime64()
date_str = date_pydt.strftime("%Y-%m-%d")
index = date_to_index(date, date_index)
index_pydt = date_to_index(date_pydt, date_index)
index_npdt = date_to_index(date_npdt, date_index)
index_str = date_to_index(date_str, date_index)
assert_equal(index, 1001)
assert_equal(index, index_npdt)
assert_equal(index, index_pydt)
assert_equal(index, index_str)
date = date_index[0] - | Timedelta(1, "D") | pandas.Timedelta |
import pandas as pd
import numpy as np
from datetime import date
"""
dataset split:
(date_received)
dateset3: 20160701~20160731 (113640),features3 from 20160315~20160630 (off_test)
dateset2: 20160515~20160615 (258446),features2 from 20160201~20160514
dateset1: 20160414~20160514 (138303),features1 from 20160101~20160413
1.merchant related:
sales_use_coupon. total_coupon
transfer_rate = sales_use_coupon/total_coupon.
merchant_avg_distance,merchant_min_distance,merchant_max_distance of those use coupon
total_sales. coupon_rate = sales_use_coupon/total_sales.
2.coupon related:
discount_rate. discount_man. discount_jian. is_man_jian
day_of_week,day_of_month. (date_received)
3.user related:
distance.
user_avg_distance, user_min_distance,user_max_distance.
buy_use_coupon. buy_total. coupon_received.
buy_use_coupon/coupon_received.
avg_diff_date_datereceived. min_diff_date_datereceived. max_diff_date_datereceived.
count_merchant.
4.user_merchant:
times_user_buy_merchant_before.
5. other feature:
this_month_user_receive_all_coupon_count
this_month_user_receive_same_coupon_count
this_month_user_receive_same_coupon_lastone
this_month_user_receive_same_coupon_firstone
this_day_user_receive_all_coupon_count
this_day_user_receive_same_coupon_count
day_gap_before, day_gap_after (receive the same coupon)
"""
#1754884 record,1053282 with coupon_id,9738 coupon. date_received:20160101~20160615,date:20160101~20160630, 539438 users, 8415 merchants
off_train = pd.read_csv('data/ccf_offline_stage1_train.csv',header=None)
off_train.columns = ['user_id','merchant_id','coupon_id','discount_rate','distance','date_received','date']
#2050 coupon_id. date_received:20160701~20160731, 76309 users(76307 in trainset, 35965 in online_trainset), 1559 merchants(1558 in trainset)
off_test = pd.read_csv('data/ccf_offline_stage1_test_revised.csv',header=None)
off_test.columns = ['user_id','merchant_id','coupon_id','discount_rate','distance','date_received']
#11429826 record(872357 with coupon_id),762858 user(267448 in off_train)
on_train = pd.read_csv('data/ccf_online_stage1_train.csv',header=None)
on_train.columns = ['user_id','merchant_id','action','coupon_id','discount_rate','date_received','date']
dataset3 = off_test
feature3 = off_train[((off_train.date>='20160315')&(off_train.date<='20160630'))|((off_train.date=='null')&(off_train.date_received>='20160315')&(off_train.date_received<='20160630'))]
dataset2 = off_train[(off_train.date_received>='20160515')&(off_train.date_received<='20160615')]
feature2 = off_train[(off_train.date>='20160201')&(off_train.date<='20160514')|((off_train.date=='null')&(off_train.date_received>='20160201')&(off_train.date_received<='20160514'))]
dataset1 = off_train[(off_train.date_received>='20160414')&(off_train.date_received<='20160514')]
feature1 = off_train[(off_train.date>='20160101')&(off_train.date<='20160413')|((off_train.date=='null')&(off_train.date_received>='20160101')&(off_train.date_received<='20160413'))]
############# other feature ##################3
"""
5. other feature:
this_month_user_receive_all_coupon_count
this_month_user_receive_same_coupon_count
this_month_user_receive_same_coupon_lastone
this_month_user_receive_same_coupon_firstone
this_day_user_receive_all_coupon_count
this_day_user_receive_same_coupon_count
day_gap_before, day_gap_after (receive the same coupon)
"""
#for dataset3
t = dataset3[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset3[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset3[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset3[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset3[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset3[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset3[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset3[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature3 = pd.merge(t1,t,on='user_id')
other_feature3 = pd.merge(other_feature3,t3,on=['user_id','coupon_id'])
other_feature3 = pd.merge(other_feature3,t4,on=['user_id','date_received'])
other_feature3 = pd.merge(other_feature3,t5,on=['user_id','coupon_id','date_received'])
other_feature3 = pd.merge(other_feature3,t7,on=['user_id','coupon_id','date_received'])
other_feature3.to_csv('data/other_feature3.csv',index=None)
print(other_feature3.shape)
#for dataset2
t = dataset2[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset2[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset2[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset2[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received.astype('int')
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received.astype('int') - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset2[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset2[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset2[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset2[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature2 = pd.merge(t1,t,on='user_id')
other_feature2 = pd.merge(other_feature2,t3,on=['user_id','coupon_id'])
other_feature2 = pd.merge(other_feature2,t4,on=['user_id','date_received'])
other_feature2 = pd.merge(other_feature2,t5,on=['user_id','coupon_id','date_received'])
other_feature2 = pd.merge(other_feature2,t7,on=['user_id','coupon_id','date_received'])
other_feature2.to_csv('data/other_feature2.csv',index=None)
print(other_feature2.shape)
#for dataset1
t = dataset1[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset1[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset1[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset1[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received.astype('int')
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received.astype('int') - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset1[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset1[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset1[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset1[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature1 = pd.merge(t1,t,on='user_id')
other_feature1 = pd.merge(other_feature1,t3,on=['user_id','coupon_id'])
other_feature1 = pd.merge(other_feature1,t4,on=['user_id','date_received'])
other_feature1 = pd.merge(other_feature1,t5,on=['user_id','coupon_id','date_received'])
other_feature1 = pd.merge(other_feature1,t7,on=['user_id','coupon_id','date_received'])
other_feature1.to_csv('data/other_feature1.csv',index=None)
print(other_feature1.shape)
############# coupon related feature #############
"""
2.coupon related:
discount_rate. discount_man. discount_jian. is_man_jian
day_of_week,day_of_month. (date_received)
"""
def calc_discount_rate(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return float(s[0])
else:
return 1.0-float(s[1])/float(s[0])
def get_discount_man(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return 'null'
else:
return int(s[0])
def get_discount_jian(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return 'null'
else:
return int(s[1])
def is_man_jian(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return 0
else:
return 1
#dataset3
dataset3['day_of_week'] = dataset3.date_received.astype('str').apply(lambda x:date(int(x[0:4]),int(x[4:6]),int(x[6:8])).weekday()+1)
dataset3['day_of_month'] = dataset3.date_received.astype('str').apply(lambda x:int(x[6:8]))
dataset3['days_distance'] = dataset3.date_received.astype('str').apply(lambda x:(date(int(x[0:4]),int(x[4:6]),int(x[6:8]))-date(2016,6,30)).days)
dataset3['discount_man'] = dataset3.discount_rate.apply(get_discount_man)
dataset3['discount_jian'] = dataset3.discount_rate.apply(get_discount_jian)
dataset3['is_man_jian'] = dataset3.discount_rate.apply(is_man_jian)
dataset3['discount_rate'] = dataset3.discount_rate.apply(calc_discount_rate)
d = dataset3[['coupon_id']]
d['coupon_count'] = 1
d = d.groupby('coupon_id').agg('sum').reset_index()
dataset3 = pd.merge(dataset3,d,on='coupon_id',how='left')
dataset3.to_csv('data/coupon3_feature.csv',index=None)
#dataset2
dataset2['day_of_week'] = dataset2.date_received.astype('str').apply(lambda x:date(int(x[0:4]),int(x[4:6]),int(x[6:8])).weekday()+1)
dataset2['day_of_month'] = dataset2.date_received.astype('str').apply(lambda x:int(x[6:8]))
dataset2['days_distance'] = dataset2.date_received.astype('str').apply(lambda x:(date(int(x[0:4]),int(x[4:6]),int(x[6:8]))-date(2016,5,14)).days)
dataset2['discount_man'] = dataset2.discount_rate.apply(get_discount_man)
dataset2['discount_jian'] = dataset2.discount_rate.apply(get_discount_jian)
dataset2['is_man_jian'] = dataset2.discount_rate.apply(is_man_jian)
dataset2['discount_rate'] = dataset2.discount_rate.apply(calc_discount_rate)
d = dataset2[['coupon_id']]
d['coupon_count'] = 1
d = d.groupby('coupon_id').agg('sum').reset_index()
dataset2 = pd.merge(dataset2,d,on='coupon_id',how='left')
dataset2.to_csv('data/coupon2_feature.csv',index=None)
#dataset1
dataset1['day_of_week'] = dataset1.date_received.astype('str').apply(lambda x:date(int(x[0:4]),int(x[4:6]),int(x[6:8])).weekday()+1)
dataset1['day_of_month'] = dataset1.date_received.astype('str').apply(lambda x:int(x[6:8]))
dataset1['days_distance'] = dataset1.date_received.astype('str').apply(lambda x:(date(int(x[0:4]),int(x[4:6]),int(x[6:8]))-date(2016,4,13)).days)
dataset1['discount_man'] = dataset1.discount_rate.apply(get_discount_man)
dataset1['discount_jian'] = dataset1.discount_rate.apply(get_discount_jian)
dataset1['is_man_jian'] = dataset1.discount_rate.apply(is_man_jian)
dataset1['discount_rate'] = dataset1.discount_rate.apply(calc_discount_rate)
d = dataset1[['coupon_id']]
d['coupon_count'] = 1
d = d.groupby('coupon_id').agg('sum').reset_index()
dataset1 = pd.merge(dataset1,d,on='coupon_id',how='left')
dataset1.to_csv('data/coupon1_feature.csv',index=None)
############# merchant related feature #############
"""
1.merchant related:
total_sales. sales_use_coupon. total_coupon
coupon_rate = sales_use_coupon/total_sales.
transfer_rate = sales_use_coupon/total_coupon.
merchant_avg_distance,merchant_min_distance,merchant_max_distance of those use coupon
"""
#for dataset3
merchant3 = feature3[['merchant_id','coupon_id','distance','date_received','date']]
t = merchant3[['merchant_id']]
t.drop_duplicates(inplace=True)
t1 = merchant3[merchant3.date!='null'][['merchant_id']]
t1['total_sales'] = 1
t1 = t1.groupby('merchant_id').agg('sum').reset_index()
t2 = merchant3[(merchant3.date!='null')&(merchant3.coupon_id!='null')][['merchant_id']]
t2['sales_use_coupon'] = 1
t2 = t2.groupby('merchant_id').agg('sum').reset_index()
t3 = merchant3[merchant3.coupon_id!='null'][['merchant_id']]
t3['total_coupon'] = 1
t3 = t3.groupby('merchant_id').agg('sum').reset_index()
t4 = merchant3[(merchant3.date!='null')&(merchant3.coupon_id!='null')][['merchant_id','distance']]
t4.replace('null',-1,inplace=True)
t4.distance = t4.distance.astype('int')
t4.replace(-1,np.nan,inplace=True)
t5 = t4.groupby('merchant_id').agg('min').reset_index()
t5.rename(columns={'distance':'merchant_min_distance'},inplace=True)
t6 = t4.groupby('merchant_id').agg('max').reset_index()
t6.rename(columns={'distance':'merchant_max_distance'},inplace=True)
t7 = t4.groupby('merchant_id').agg('mean').reset_index()
t7.rename(columns={'distance':'merchant_mean_distance'},inplace=True)
t8 = t4.groupby('merchant_id').agg('median').reset_index()
t8.rename(columns={'distance':'merchant_median_distance'},inplace=True)
merchant3_feature = pd.merge(t,t1,on='merchant_id',how='left')
merchant3_feature = | pd.merge(merchant3_feature,t2,on='merchant_id',how='left') | pandas.merge |
import arviz
import numpy as np
import pandas
import seaborn as sns
import torch
import torch.distributions as dist
from matplotlib import pyplot
import test_stan
import generate_data
sns.set()
# np.random.seed(1)
def user_simulator_typezero(action, W, a, educability=0.6):
# action is either a tuple, or -1 for educate.
# Educate action
if isinstance(action, int):
print("Educate!")
educate_o = dist.Bernoulli(educability).sample()
return educate_o
else:
probs = a + action @ W
a_o = dist.Bernoulli(logits=probs).sample()
return int(a_o.item())
def user_simulator_typeone(action, W, a, educability=0.6):
# action is either a tuple, or -1 for educate.
# Educate action
if isinstance(action, int):
print("Educate!")
educate_o = dist.Bernoulli(educability).sample()
return educate_o
else:
probs = a + action @ W
a_o = dist.Bernoulli(logits=probs).sample()
return int(a_o.item())
def user_simulator_switching(action, W, a, educability=0.1, user_type=0, forgetting=0.0):
# action is either a tuple, or -1 for educate.
# W[0] is the type-zero user weights, W[1] type-one.
# Educate action
educability_per_type = [educability, 1.0]
if isinstance(action, int):
user_type_ = int(dist.Bernoulli(educability_per_type[user_type]).sample().item())
#if user_type != user_type_:
# print("User Type Changed!")
return user_type_
else:
probs = a + action @ W[user_type]
a_o = dist.Bernoulli(logits=probs).sample()
return int(a_o.item())
def test_user_typezero():
training_X, training_y, test_X, test_y, _, _ = generate_data(n_noncollinear=50, n_collinear=100, n=100)
corr_mat = np.abs(np.corrcoef(torch.transpose(torch.cat((training_X, training_y.unsqueeze(dim=1)), dim=1), 0, 1)))
W_typezero = [5.0, 0.0]
W_typeone = [5.0, -5.0]
n_covars = training_X.shape[1]
data_dict = {"N": 0, "x": [], "y": [], "beta": [W_typezero, W_typeone]}
aux_data_dict = {"xi": torch.zeros(n_covars + 1, dtype=torch.bool)}
n_iterations = 100
teacher_actions = list(np.random.choice(n_covars, n_iterations))
model_file = None
for i in range(n_iterations):
act_in = teacher_actions[i]
if act_in != -1:
mask = aux_data_dict["xi"].numpy().copy()
mask[act_in] = False
masked = corr_mat[act_in, mask]
if masked.size != 0:
max_cross_corr = np.max(masked)
else:
max_cross_corr = 0.0
action = torch.tensor([corr_mat[act_in, -1], max_cross_corr])
outcome = user_simulator_typezero(action, torch.tensor(W_typezero, dtype=torch.double), a=1.0)
if outcome == 1.0:
aux_data_dict["xi"][act_in] = True
else:
aux_data_dict["xi"][act_in] = False
data_dict["x"].append(action.tolist())
data_dict["y"].append(outcome)
data_dict["N"] += 1
fit, model_file = test_stan.fit_model_w_education(data_dict, model_file)
arviz.plot_trace(fit)
pyplot.show()
def test_user_typeone():
training_X, training_y, test_X, test_y, _, _ = generate_data(n_noncollinear=50, n_collinear=100, n=100)
corr_mat = np.abs(np.corrcoef(torch.transpose(torch.cat((training_X, training_y.unsqueeze(dim=1)), dim=1), 0, 1)))
W_typezero = [5.0, 0.0]
W_typeone = [5.0, -5.0]
n_covars = training_X.shape[1]
data_dict = {"N": 0, "x": [], "y": [], "beta": [W_typezero, W_typeone]}
aux_data_dict = {"xi": torch.zeros(n_covars + 1, dtype=torch.bool)}
n_iterations = 20
teacher_actions = list(np.random.choice(n_covars, n_iterations))
model_file = None
for i in range(n_iterations):
act_in = teacher_actions[i]
if act_in != -1:
mask = aux_data_dict["xi"].numpy().copy()
mask[act_in] = False
masked = corr_mat[act_in, mask]
if masked.size != 0:
max_cross_corr = np.max(masked)
else:
max_cross_corr = 0.0
action = torch.tensor([corr_mat[act_in, -1], max_cross_corr])
outcome = user_simulator_typeone(action, torch.tensor(W_typeone, dtype=torch.double), a=1.0)
if outcome == 1.0:
aux_data_dict["xi"][act_in] = True
else:
aux_data_dict["xi"][act_in] = False
data_dict["x"].append(action.tolist())
data_dict["y"].append(outcome)
data_dict["N"] += 1
fit, model_file = test_stan.fit_model_w_education(data_dict, model_file)
arviz.plot_trace(fit)
pyplot.show()
def test_user_switching(educability=0.01):
training_X, training_y, test_X, test_y, _, _ = generate_data(n_noncollinear=50, n_collinear=100, n=100)
corr_mat = np.abs(np.corrcoef(torch.transpose(torch.cat((training_X, training_y.unsqueeze(dim=1)), dim=1), 0, 1)))
sns.heatmap(corr_mat)
pyplot.show()
W_typezero = [5.0, 0.0]
W_typeone = [5.0, -5.0]
n_covars = training_X.shape[1]
data_dict = {"N": 0, "x": [], "y": [], "beta": [W_typezero, W_typeone], "educability": educability,
"forgetting": 0.0}
aux_data_dict = {"xi": torch.zeros(n_covars + 1, dtype=torch.bool)}
n_iterations = 100
recommend_actions = list(np.random.choice(n_covars, n_iterations))
educate_or_recommend = list(np.random.choice(2, n_iterations, p=(0.5, 0.5)))
educate_or_recommend[0] = 1
model_file = None
user_type = 0
change_point = 0
for i in range(n_iterations):
#print("Step: {}".format(i))
if educate_or_recommend[i] == 0:
act_in = -1
else:
act_in = recommend_actions[i]
if act_in != -1:
mask = aux_data_dict["xi"].numpy().copy()
mask[act_in] = False
masked = corr_mat[act_in, mask]
if masked.size != 0:
max_cross_corr = np.max(masked)
else:
max_cross_corr = 0.0
action = torch.tensor([corr_mat[act_in, -1], max_cross_corr])
outcome = user_simulator_switching(action, torch.tensor([W_typezero, W_typeone], dtype=torch.double), a=1.0,
educability=data_dict["educability"], user_type=user_type)
if outcome == 1:
aux_data_dict["xi"][act_in] = True
else:
aux_data_dict["xi"][act_in] = False
data_dict["x"].append(action.tolist())
data_dict["y"].append(outcome)
else:
_user_type = 0 + user_type
user_type = user_simulator_switching(act_in, torch.tensor([W_typezero, W_typeone], dtype=torch.double),
a=1.0, educability=data_dict["educability"], user_type=user_type)
action = [-1.0, -1.0]
outcome = 0
data_dict["x"].append(action)
data_dict["y"].append(outcome)
if user_type == 1 and _user_type == 0:
print("State Changed to Type 1 at iteration: {}".format(i))
change_point += i
data_dict["N"] += 1
fit, model_file = test_stan.fit_model_w_education(data_dict, model_file)
# if i % 100 ==0:
s = fit.summary()
print(fit)
arviz.plot_trace(fit)
pyplot.show()
summary = | pandas.DataFrame(s['summary'], columns=s['summary_colnames'], index=s['summary_rownames']) | pandas.DataFrame |
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
import numpy as np
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
import sys
sys.path.append("..")
from plot_config import *
# need cameraready for original thresh
rundata = list(Path("../data/cameraready/").glob("*out.csv"))
sensitivity_rundata = list(Path("../data/thresh_sensitivity").glob("*out.csv"))
def compile_results():
orig_dfiles = pd.concat([pd.read_csv(d) for d in rundata])
thresh_dfiles = pd.concat([pd.read_csv(d) for d in sensitivity_rundata])
# the hartmann thresh=0.5 runs were duplicated so drop them
idx = np.logical_not(np.logical_and(
thresh_dfiles.problem == "hartmann6_binary",
thresh_dfiles.opt_strat_target == 0.5,
))
df = | pd.concat([orig_dfiles, thresh_dfiles[idx]]) | pandas.concat |
import pickle
import pandas as pd
import numpy as np
import xlsxwriter
import csv
import random
from collections import defaultdict
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale, MinMaxScaler, StandardScaler
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import fbeta_score, precision_score, make_scorer, average_precision_score, recall_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
import datetime
from sklearn.metrics import auc, roc_curve, roc_auc_score
mlb = MultiLabelBinarizer()
misc = list()
df = pd.read_excel("../data/misc.xlsx")
for i in df.MISCELLANEOUS:
misc.append(i)
all_se = list()
df = pd.read_csv("../data/unique_SE.csv")
for i in df.side_effect_name:
all_se.append(i)
s1 = set(all_se)
s2 = set(misc)
s3 = list(s1 - s2)
print ("Total:",len(s1))
print ("Misc:",len(s2))
print ("Total excluding misc:",len(s3))
l1 = s3
columns = [
'stitch_id_flat',
'stitch_id_sterio',
'umls_cui_from_label',
'meddra_type',
'umls_cui_from_meddra',
'side_effect_name',
]
sedf = pd.read_table('../data/meddra_all_se.tsv', names=columns)
sedf.drop(sedf[sedf.meddra_type == "LLT"].index, inplace=True)
sedf = sedf.groupby('stitch_id_flat').side_effect_name.apply(list).reset_index()
sedf['labels'] = None
d2 = pd.read_excel("../data/2d_prop.xlsx")
d3 = | pd.read_excel("../data/3d_prop.xlsx") | pandas.read_excel |
#!/usr/bin/env python3
"""
USAGE:
python create-protein-table.py --infile_peptide input.fa --infile_taxonomy
tax.txt --outfile_json protein-species-map.json --output output_table.txt
[--delim delimiter --column column]
Generates table useable for taxonomic placment with EUKulele.
If no delimiter is provided, default is '/'. If no column header is provided,
default is SOURCE_ID.
python EUKulele/scripts/create_protein_table.py --infile_peptide
EUKulele/tests/aux_data/mmetsp/reference-pep-trunc.pep.faa --infile_taxonomy
EUKulele/tests/aux_data/mmetsp/taxonomy-table.txt --outfile_json
EUKulele/tests/aux_data/mmetsp/protein-map.json --output
EUKulele/tests/aux_data/mmetsp/tax-table.txt --delim "/"
--strain_col_id strain_name --taxonomy_col_id taxonomy --column SOURCE_ID
create_protein_table.py --infile_peptide reference.pep.fa --infile_taxonomy
taxonomy-table.txt --outfile_json prot-map.json --output tax-table.txt
--delim "/" --col_source_id strain_name --taxonomy_col_id taxonomy --column 2
python
/vortexfs1/omics/alexander/akrinos/remodeling/EUKulele/scripts/create_protein_table.py
--infile_peptide
/vortexfs1/omics/alexander/akrinos/EUKulele-Reference/phylodb_db/reference.pep.fa
--infile_taxonomy
/vortexfs1/omics/alexander/akrinos/EUKulele-Reference/phylodb_db/tax-table.txt
--outfile_json
/vortexfs1/omics/alexander/akrinos/EUKulele-Reference/phylodb_db/prot-map.json
--output
/vortexfs1/omics/alexander/akrinos/EUKulele-Reference/phylodb_db/taxonomy_table.txt
--delim "\t" --strain_col_id strain_name --taxonomy_col_id taxonomy --column 2
"""
import os
import argparse
import json
from Bio import SeqIO
import pandas as pd
def createProteinTable(args=None):
'''
Main function; intended to parse and create required files
for EUKulele database creation.
'''
parser = argparse.ArgumentParser()
parser.add_argument('--infile_peptide', type = list, nargs='+', required=True)
# this should be given as a list of the input files
parser.add_argument('--infile_taxonomy', default='')
# the original taxonomy file
parser.add_argument('--outfile_json', default = 'prot-map.json')
# the protein json file to be written
parser.add_argument('--output', default = 'tax-table.txt')
# the output taxonomy table (formatted) to be written
parser.add_argument('--delim', type=str, default = '/')
parser.add_argument('--col_source_id', type=str, default = 'Source_ID')
# the column which indicates the name of the strain in the taxonomy file
parser.add_argument('--taxonomy_col_id', type=str, default = 'taxonomy')
# the column which indicates the taxonomy of the strain in the taxonomy file
parser.add_argument('--column', type=str, default = 'SOURCE_ID')
# can be numeric, zero-indexed, if it's a delimited part of header
# set to true if there is a column called "taxonomy" that we wish to split
parser.add_argument('--reformat_tax', dest='reformat', default=False, action='store_true')
parser.add_argument('--euk-prot', dest='eukprot',
default=False, action='store_true') # eukprot's taxonomy is too unique
if args is not None:
args = parser.parse_args(args)
else:
args = parser.parse_args()
# if the input is a folder, you also need to add the first token in the
# underscore-separated name to a dictionary for that
# one - ultimately we want to know which MMETSP or whatever it came from
odict = {}
for curr_pepfile in list(args.infile_peptide):
pepfile = "".join(curr_pepfile)
for record in SeqIO.parse(pepfile, "fasta"):
header = record.description
rid = record.id.replace(".","N") #record.id.split(".")[0] #record.id.replace(".","N")
counter = 2
while rid in odict:
if "_" in rid:
rid = "_".join(rid.split("_")[0:-1]) + "_" + str(counter)
else:
rid = rid + "_" + str(counter)
counter = counter + 1
if 't' in args.delim: # why is this tab thing not working otherwise?? even the equality
tester = "".join(list(str(header))).replace('\t', ' ')
hlist = tester.split(" ")
else:
header = str(header).replace(args.delim, "hello")
hlist = header.split("hello")
if len(args.infile_peptide) > 1:
# if there is a list of files, use the filename as the ID
sid = pepfile.split("/")[-1].split("_")[0]
odict[rid] = sid
elif args.column.isdigit():
sid = hlist[int(args.column)]
odict[rid] = sid
else:
for h_curr in hlist:
if args.column in h_curr: #h.startswith(args.column):
sid = h_curr.split('=')[1].strip()
odict[rid] = sid
break
print("Modifying...",pepfile,flush=True)
os.system("cut -f 1 " + str(pepfile) + " > " + str(pepfile) + ".tester.pep.fa")
os.system("perl -i -pe 's/$/_$seen{$_}/ if ++$seen{$_}>1 and /^>/; ' " + \
str(pepfile) + ".tester.pep.fa")
os.system("mv " + str(pepfile) + ".tester.pep.fa " + str(pepfile))
tax_file = pd.read_csv(args.infile_taxonomy, sep = "\t", encoding='latin-1')
if args.reformat:
colnames_tax = ["Source_ID","Supergroup","Division","Class",
"Order","Family","Genus","Species"]
tax_out = | pd.DataFrame(columns=colnames_tax) | pandas.DataFrame |
"""
Define the SeriesGroupBy and DataFrameGroupBy
classes that hold the groupby interfaces (and some implementations).
These are user facing as the result of the ``df.groupby(...)`` operations,
which here returns a DataFrameGroupBy object.
"""
from __future__ import annotations
from collections import abc
from functools import partial
from textwrap import dedent
from typing import (
Any,
Callable,
Hashable,
Iterable,
Mapping,
NamedTuple,
TypeVar,
Union,
cast,
)
import warnings
import numpy as np
from pandas._libs import reduction as libreduction
from pandas._typing import (
ArrayLike,
Manager,
Manager2D,
SingleManager,
)
from pandas.util._decorators import (
Appender,
Substitution,
doc,
)
from pandas.core.dtypes.common import (
ensure_int64,
is_bool,
is_categorical_dtype,
is_dict_like,
is_integer_dtype,
is_interval_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import (
isna,
notna,
)
from pandas.core import (
algorithms,
nanops,
)
from pandas.core.apply import (
GroupByApply,
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
)
from pandas.core.base import SpecificationError
import pandas.core.common as com
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base
from pandas.core.groupby.groupby import (
GroupBy,
_agg_template,
_apply_docs,
_transform_template,
warn_dropping_nuisance_columns_deprecated,
)
from pandas.core.indexes.api import (
Index,
MultiIndex,
all_indexes_same,
)
from pandas.core.series import Series
from pandas.core.util.numba_ import maybe_use_numba
from pandas.plotting import boxplot_frame_groupby
# TODO(typing) the return value on this callable should be any *scalar*.
AggScalar = Union[str, Callable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = TypeVar("ScalarResult")
class NamedAgg(NamedTuple):
column: Hashable
aggfunc: AggScalar
def generate_property(name: str, klass: type[DataFrame | Series]):
"""
Create a property for a GroupBy subclass to dispatch to DataFrame/Series.
Parameters
----------
name : str
klass : {DataFrame, Series}
Returns
-------
property
"""
def prop(self):
return self._make_wrapper(name)
parent_method = getattr(klass, name)
prop.__doc__ = parent_method.__doc__ or ""
prop.__name__ = name
return property(prop)
def pin_allowlisted_properties(
klass: type[DataFrame | Series], allowlist: frozenset[str]
):
"""
Create GroupBy member defs for DataFrame/Series names in a allowlist.
Parameters
----------
klass : DataFrame or Series class
class where members are defined.
allowlist : frozenset[str]
Set of names of klass methods to be constructed
Returns
-------
class decorator
Notes
-----
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
def pinner(cls):
for name in allowlist:
if hasattr(cls, name):
# don't override anything that was explicitly defined
# in the base class
continue
prop = generate_property(name, klass)
setattr(cls, name, prop)
return cls
return pinner
@pin_allowlisted_properties(Series, base.series_apply_allowlist)
class SeriesGroupBy(GroupBy[Series]):
_apply_allowlist = base.series_apply_allowlist
def _wrap_agged_manager(self, mgr: Manager) -> Series:
if mgr.ndim == 1:
mgr = cast(SingleManager, mgr)
single = mgr
else:
mgr = cast(Manager2D, mgr)
single = mgr.iget(0)
ser = self.obj._constructor(single, name=self.obj.name)
# NB: caller is responsible for setting ser.index
return ser
def _get_data_to_aggregate(self) -> SingleManager:
ser = self._obj_with_exclusions
single = ser._mgr
return single
def _iterate_slices(self) -> Iterable[Series]:
yield self._selected_obj
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.groupby([1, 1, 2, 2]).min()
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg('min')
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])
min max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.groupby([1, 1, 2, 2]).agg(
... minimum='min',
... maximum='max',
... )
minimum maximum
1 1 2
2 3 4
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> s.groupby([1, 1, 2, 2]).agg(lambda x: x.astype(float).min())
1 1.0
2 3.0
dtype: float64
"""
)
@Appender(
_apply_docs["template"].format(
input="series", examples=_apply_docs["series_examples"]
)
)
def apply(self, func, *args, **kwargs):
return super().apply(func, *args, **kwargs)
@doc(_agg_template, examples=_agg_examples_doc, klass="Series")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result.ravel(), index=index, name=data.name)
relabeling = func is None
columns = None
if relabeling:
columns, func = validate_func_kwargs(kwargs)
kwargs = {}
if isinstance(func, str):
return getattr(self, func)(*args, **kwargs)
elif isinstance(func, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func = maybe_mangle_lambdas(func)
ret = self._aggregate_multiple_funcs(func)
if relabeling:
# error: Incompatible types in assignment (expression has type
# "Optional[List[str]]", variable has type "Index")
ret.columns = columns # type: ignore[assignment]
return ret
else:
cyfunc = com.get_cython_func(func)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
try:
return self._python_agg_general(func, *args, **kwargs)
except KeyError:
# TODO: KeyError is raised in _python_agg_general,
# see test_groupby.test_basic
result = self._aggregate_named(func, *args, **kwargs)
# result is a dict whose keys are the elements of result_index
index = self.grouper.result_index
return create_series_with_explicit_dtype(
result, index=index, dtype_if_empty=object
)
agg = aggregate
def _aggregate_multiple_funcs(self, arg) -> DataFrame:
if isinstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
raise SpecificationError("nested renamer is not supported")
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg]
# indicated column order
columns = next(zip(*arg))
else:
# list of functions / function names
columns = []
for f in arg:
columns.append(com.get_callable_name(f) or f)
arg = zip(columns, arg)
results: dict[base.OutputKey, DataFrame | Series] = {}
for idx, (name, func) in enumerate(arg):
key = base.OutputKey(label=name, position=idx)
results[key] = self.aggregate(func)
if any(isinstance(x, DataFrame) for x in results.values()):
from pandas import concat
res_df = concat(
results.values(), axis=1, keys=[key.label for key in results.keys()]
)
return res_df
indexed_output = {key.position: val for key, val in results.items()}
output = self.obj._constructor_expanddim(indexed_output, index=None)
output.columns = Index(key.label for key in results)
output = self._reindex_output(output)
return output
def _indexed_output_to_ndframe(
self, output: Mapping[base.OutputKey, ArrayLike]
) -> Series:
"""
Wrap the dict result of a GroupBy aggregation into a Series.
"""
assert len(output) == 1
values = next(iter(output.values()))
result = self.obj._constructor(values)
result.name = self.obj.name
return result
def _wrap_applied_output(
self,
data: Series,
values: list[Any],
not_indexed_same: bool = False,
) -> DataFrame | Series:
"""
Wrap the output of SeriesGroupBy.apply into the expected result.
Parameters
----------
data : Series
Input data for groupby operation.
values : List[Any]
Applied output for each group.
not_indexed_same : bool, default False
Whether the applied outputs are not indexed the same as the group axes.
Returns
-------
DataFrame or Series
"""
if len(values) == 0:
# GH #6265
return self.obj._constructor(
[],
name=self.obj.name,
index=self.grouper.result_index,
dtype=data.dtype,
)
assert values is not None
if isinstance(values[0], dict):
# GH #823 #24880
index = self.grouper.result_index
res_df = self.obj._constructor_expanddim(values, index=index)
res_df = self._reindex_output(res_df)
# if self.observed is False,
# keep all-NaN rows created while re-indexing
res_ser = res_df.stack(dropna=self.observed)
res_ser.name = self.obj.name
return res_ser
elif isinstance(values[0], (Series, DataFrame)):
return self._concat_objects(values, not_indexed_same=not_indexed_same)
else:
# GH #6265 #24880
result = self.obj._constructor(
data=values, index=self.grouper.result_index, name=self.obj.name
)
return self._reindex_output(result)
def _aggregate_named(self, func, *args, **kwargs):
# Note: this is very similar to _aggregate_series_pure_python,
# but that does not pin group.name
result = {}
initialized = False
for name, group in self:
object.__setattr__(group, "name", name)
output = func(group, *args, **kwargs)
output = libreduction.extract_result(output)
if not initialized:
# We only do this validation on the first iteration
libreduction.check_result_array(output, group.dtype)
initialized = True
result[name] = output
return result
@Substitution(klass="Series")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
return self._transform(
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
def _cython_transform(
self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs
):
assert axis == 0 # handled by caller
obj = self._selected_obj
try:
result = self.grouper._cython_operation(
"transform", obj._values, how, axis, **kwargs
)
except NotImplementedError as err:
raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err
return obj._constructor(result, index=self.obj.index, name=obj.name)
def _transform_general(self, func: Callable, *args, **kwargs) -> Series:
"""
Transform with a callable func`.
"""
assert callable(func)
klass = type(self.obj)
results = []
for name, group in self:
# this setattr is needed for test_transform_lambda_with_datetimetz
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
results.append(klass(res, index=group.index))
# check for empty "results" to avoid concat ValueError
if results:
from pandas.core.reshape.concat import concat
concatenated = concat(results)
result = self._set_result_index_ordered(concatenated)
else:
result = self.obj._constructor(dtype=np.float64)
result.name = self.obj.name
return result
def _can_use_transform_fast(self, result) -> bool:
return True
def filter(self, func, dropna: bool = True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Series
"""
if isinstance(func, str):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x) -> bool:
b = wrapper(x)
return b and notna(b)
try:
indices = [
self._get_index(name) for name, group in self if true_and_notna(group)
]
except (ValueError, TypeError) as err:
raise TypeError("the filter must return a boolean result") from err
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna: bool = True) -> Series:
"""
Return number of unique elements in the group.
Returns
-------
Series
Number of unique values within each group.
"""
ids, _, _ = self.grouper.group_info
val = self.obj._values
codes, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((codes, ids))
codes = codes[sorter]
ids = ids[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, codes[1:] != codes[:-1]]
# 1st item of each group is a new unique observation
mask = codes == -1
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype("int64", copy=False)
if len(ids):
# NaN/NaT group exists if the head of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids[idx]] = out
result = self.obj._constructor(res, index=ri, name=self.obj.name)
return self._reindex_output(result, fill_value=0)
@doc(Series.describe)
def describe(self, **kwargs):
return super().describe(**kwargs)
def value_counts(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
dropna: bool = True,
):
from pandas.core.reshape.merge import get_join_indexers
from pandas.core.reshape.tile import cut
ids, _, _ = self.grouper.group_info
val = self.obj._values
def apply_series_value_counts():
return self.apply(
Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
if bins is not None:
if not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return apply_series_value_counts()
elif is_categorical_dtype(val.dtype):
# GH38672
return apply_series_value_counts()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Series(val), bins, include_lowest=True)
# error: "ndarray" has no attribute "cat"
lev = lab.cat.categories # type: ignore[attr-defined]
# error: No overload variant of "take" of "_ArrayOrScalarCommon" matches
# argument types "Any", "bool", "Union[Any, float]"
lab = lev.take( # type: ignore[call-overload]
# error: "ndarray" has no attribute "cat"
lab.cat.codes, # type: ignore[attr-defined]
allow_fill=True,
# error: Item "ndarray" of "Union[ndarray, Index]" has no attribute
# "_na_value"
fill_value=lev._na_value, # type: ignore[union-attr]
)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if | is_interval_dtype(lab.dtype) | pandas.core.dtypes.common.is_interval_dtype |
"""
This file contains code to load EBA, eGrid and AMPD datasets. The file provides
one class for each data set. Data can either be raw (of the type that is output
from the parse.py script) or cleaned (outputs from clean.py).
The data handler classes provide methods to access the data in different ways
and perform some checks.
"""
from os.path import join
import pandas as pd
import logging
import json
import re
from gridemissions import config
from gridemissions.eia_api import KEYS, BAs, EIA_ALLOWED_SERIES_ID
class BaData(object):
"""Class to handle BA-level data. The EBA class provides generation,
consumption, the trade matrix and total interchange either at the BA or at
the regional (IEA-defined) level. User guide:
https://www.eia.gov/realtime_grid/docs/userguide-knownissues.pdf
The class is a light wrapped around the pd.DataFrame object which is the
format in which the underlying data are stored.
The main purpose of the class is to provide convenience functions that make
handling the data easier.
Timestamps are in UTC.
EBA data columns
----------------
D: Demand
NG: Net Generation
TI: Total Interchange - (positive if exports)
ID: Interchange with directly connected balancing authorities - (positive
if exports)
Consistency requirements
------------------------
- Interchange data is antisymmetric: ID[i,j] == -ID[j,i]
- Total trade with interchange data: TI == sum(ID[i,:])
- Balance equation for total trade, demand, generation: TI + D == NG
Methods
-------
get_cols(self, r) : generate column names for regions r for a given field.
Attributes
----------
regions : are in alphabetical order
df : raw dataframe
"""
def __init__(self, fileNm=None, df=None, variable="E", dataset="EBA", step=None):
"""
Initialize the BaData object
There are two preferred ways to initialize this object: by passing a `pd.DataFrame`,
or by passing a file name from which to read the data using `pd.read_csv`.
Parameters
----------
fileNm: str, default None
fileNm from which to read the data
df: pd.DataFrame, default None
data
dataset: str, default "EBA"
base name for file in which data are stored. Parameter will be deprecated soon and
should not be used.
step: int, default None
processing step at which to load the data. Parameter will be deprecated soon and
should not be used.
"""
self.logger = logging.getLogger("load")
if df is not None:
self.df = df
else:
if step is not None:
fileNm = join(
config["DATA_PATH"], "analysis", "%s_%d.csv" % (dataset, step)
)
if fileNm is None:
fileNm = join(config["DATA_PATH"], "analysis", "EBA_0.csv")
self.df = pd.read_csv(fileNm, index_col=0, parse_dates=True)
self.variable = variable
self.regions = self._parse_data_cols()
self.fileNm = fileNm
self.KEY = KEYS[variable]
def get_cols(self, r=None, field="D"):
"""
Retrieve column name(s) corresponding to region(s) and a field
Parameters
----------
r: str or list of str, default None
regions. If None, data is returned for all regions
field: str
field for which to load columns. Used to index in self.KEY
Returns
-------
cols: list of str
"""
if field not in self.KEY:
raise ValueError(f"{field} not in str(list(self.KEY.keys()))")
if field != "ID":
if r is None:
r = self.regions
if isinstance(r, str):
r = [r]
return [self.KEY[field] % ir for ir in r]
else:
if r is None:
r = self.regions
if isinstance(r, str):
r = [r]
return [
self.KEY[field] % (ir, ir2)
for ir in r
for ir2 in self.regions
if self.KEY[field] % (ir, ir2) in self.df.columns
]
def get_trade_partners(self, r):
"""
Return list of regions that trade with a given region
Parameter
---------
r: str
region for which to search
"""
partners = []
for r2 in self.regions:
if (self.KEY["ID"] % (r, r2) in self.df.columns) and (
self.KEY["ID"] % (r2, r) in self.df.columns
):
partners += [r2]
return partners
def _parse_data_cols(self):
"""
Checks:
- Consistent number of regions for demand / generation / total
interchange / trade matrix
Returns the list of regions
"""
regions = set([re.split(r"\.|-|_", el)[1] for el in self.df.columns])
D_cols = [
re.split(r"\.|-|_", el)[1]
for el in self.df.columns
if "D" in re.split(r"\.|-|_", el)
]
# The check in [3, 5] was added to filter out the generation columns
# by source in electricity
NG_cols = [
re.split(r"\.|-|_", el)[1]
for el in self.df.columns
if (
("NG" in re.split(r"\.|-|_", el))
and (len(re.split(r"\.|-|_", el)) in [3, 5])
)
]
TI_cols = [
re.split(r"\.|-|_", el)[1]
for el in self.df.columns
if "TI" in re.split(r"\.|-|_", el)
]
ID_cols = [
re.split(r"\.|-|_", el)[1]
for el in self.df.columns
if "ID" in re.split(r"\.|-|_", el)
]
ID_cols2 = [
re.split(r"\.|-|_", el)[2]
for el in self.df.columns
if "ID" in re.split(r"\.|-|_", el)
]
self.D_cols = D_cols
self.NG_cols = NG_cols
self.TI_cols = TI_cols
self.ID_cols = ID_cols
self.ID_cols2 = ID_cols2
if len(NG_cols) != len(D_cols):
self.logger.warn("Inconsistent columns: len(NG_cols) != len(D_cols)")
if set(NG_cols) != regions:
self.logger.warn("Inconsistent columns: set(NG_cols) != regions")
if not ("i" in self.variable):
if len(NG_cols) != len(TI_cols):
self.logger.warn("Inconsistent columns: len(NG_cols) != len(TI_cols)")
if set(NG_cols) != set(ID_cols):
self.logger.warn("Inconsistent columns: set(NG_cols) != set(ID_cols)")
if set(NG_cols) != set(ID_cols2):
self.logger.warn("Inconsistent columns: set(NG_cols) != set(ID_cols2)")
return sorted(list(regions))
def get_trade_out(self, r=None):
if r is None:
r = self.regions
if isinstance(r, str):
r = [r]
cols = []
for ir2 in self.regions:
cols += [self.KEY["ID"] % (ir, ir2) for ir in r]
return [c for c in cols if c in self.df.columns]
def checkBA(self, ba, tol=1e-2, log_level=logging.INFO):
"""
Sanity check function
TODO: add check for different generation sources, if data is present
"""
logger = self.logger
log_level_old = logger.level
logger.setLevel(log_level)
logger.debug("Checking %s" % ba)
partners = self.get_trade_partners(ba)
# NaNs
for field in ["D", "NG", "TI"]:
ind_na = self.df.loc[:, self.get_cols(r=ba, field=field)[0]].isna()
cnt_na = ind_na.sum()
if cnt_na != 0:
logger.error(
"There are still %d nans for %s field %s" % (cnt_na, ba, field)
)
for ba2 in partners:
cnt_na = self.df.loc[:, self.KEY["ID"] % (ba, ba2)].isna().sum()
if cnt_na != 0:
logger.error("There are still %d nans for %s-%s" % (cnt_na, ba, ba2))
# TI+D == NG
res1 = self.df.loc[:, self.get_cols(r=ba, field="NG")[0]] - (
self.df.loc[:, self.get_cols(r=ba, field="D")[0]]
+ self.df.loc[:, self.get_cols(r=ba, field="TI")[0]]
)
if (res1.abs() > tol).sum() != 0:
logger.error("%s: TI+D == NG violated" % ba)
# TI == ID.sum()
res2 = self.df.loc[:, self.get_cols(r=ba, field="TI")[0]] - self.df.loc[
:, [self.KEY["ID"] % (ba, ba2) for ba2 in partners]
].sum(axis=1)
if (res2.abs() > tol).sum() != 0:
logger.error("%s: TI == ID.sum()violated" % ba)
# ID[i,j] == -ID[j,i]
for ba2 in partners:
res3 = (
self.df.loc[:, self.KEY["ID"] % (ba, ba2)]
+ self.df.loc[:, self.KEY["ID"] % (ba2, ba)]
)
if (res3.abs() > tol).sum() != 0:
logger.error("%s-%s: ID[i,j] == -ID[j,i] violated" % (ba, ba2))
# D and NG negative
for field in ["D", "NG"]:
ind_neg = self.df.loc[:, self.get_cols(r=ba, field=field)[0]] < 0
cnt_neg = ind_neg.sum()
if cnt_neg != 0:
logger.error(
"%s: there are %d <0 values for field %s" % (ba, cnt_neg, field)
)
logger.setLevel(log_level_old)
def convert_raw_eba(file_name, file_name_out=None):
logger = logging.getLogger("root")
logger.setLevel(logging.DEBUG)
logger.debug("Loading raw JSON")
with open(f"{file_name}.txt") as fr:
lines = fr.readlines()
# convert json - each line is a dictionary
data = []
for l in lines:
data += [json.loads(l)]
# separate id data from ts data
ts_data = [d for d in data if len(d.keys()) == 10]
# id_list = [d for d in data if len(d.keys()) == 5]
def choose(el, ba_list):
series_id = el["series_id"]
if ".HL" in series_id:
return False
if ".ID.H" in series_id:
return (re.split(r"\.|-", series_id)[1] in ba_list) and (
re.split(r"\.|-", series_id)[2] in ba_list
)
else:
return re.split(r"\.|-", series_id)[1] in ba_list
logger.debug("Converting to dataframe")
df = pd.concat(
[
pd.DataFrame(el["data"], columns=["datetime", el["series_id"]]).set_index(
"datetime", drop=True
)
for el in ts_data
if el["series_id"] in EIA_ALLOWED_SERIES_ID
],
axis=1,
)
logger.debug("Converting to datetime and sorting")
df.index = | pd.to_datetime(df.index) | pandas.to_datetime |
import pandas as pd
import numpy as np
df = pd.DataFrame ({
'nome' : ['André', 'Junior', 'Nascimento'],
'idade': [38, 37, 36],
'cidade': ['RJ','SP','MG']
})
print(df)
print('\n')
vetor = np.array([5, 6, 7, 8])
v = | pd.Series(vetor) | pandas.Series |
"""Provides utilities for working with weather data, and for calculating the movement
of the sun from the persepcitve of specific locations.
Constants defined in this module
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
These can all be overwritten by setting environment variables with the same name.
* ``SKYFIELD_DATA_DIR``. Path to directory to save skyfield ephemeris data. By default,
``~/skyfield-data`` will be used.
* ``CAMFI_EPHEMERIS``. Name of ephemeris file to use for calculating sunset and
twilight. By default, ``de440s.bsp`` will be used. See the `choosing an ephemeris`_
in the Skyfield documentation for possible other ephemeris files to use. Note that the
ephemeris file will be loaded when this module is imported. The first time this
happens, the ephemeris file will be downloaded (def4402.bsp is about 37 mb).
**Note:** For testing purposes,
a tiny abbreviated ephemeris file is included
in the Camfi git repository.
Without this, CI testing wouldn't work.
The included ephemeris limits
the date range to a few days in 2021,
so it should not be used when running Camfi normally.
To use the included ephemeris (for testing purposes only),
set the following environment variables
(assuming you are running the tests from the camfi repo root directory)::
SKYFIELD_DATA_DIR="camfi/test/data"
CAMFI_EPHEMERIS="test_ephem.bsp"
.. _choosing an ephemeris: https://rhodesmill.org/skyfield/planets.html#choosing-an-ephemeris
"""
from datetime import date, datetime, time, timedelta, timezone
import os
from pathlib import Path
from typing import Optional, Sequence
import numpy as np
import pandas as pd
from pydantic import (
BaseModel,
FilePath,
NonNegativeFloat,
validator,
)
from skyfield.api import Loader, wgs84
from skyfield import almanac
from camfi.util import Timezone, Field
# Initialise skyfield
SKYFIELD_DATA_DIR = os.getenv(
"SKYFIELD_DATA_DIR", str(Path("~/skyfield-data").expanduser())
)
CAMFI_EPHEMERIS = os.getenv("CAMFI_EPHEMERIS", "de440s.bsp")
_load = Loader(SKYFIELD_DATA_DIR)
ephemeris = _load(CAMFI_EPHEMERIS)
timescale = _load.timescale()
TWILIGHT_TRANSITIONS = {
1: "astronomical_twilight_start",
2: "nautical_twilight_start",
3: "civil_twilight_start",
4: "sunrise",
5: "astronomical_twilight_end",
6: "nautical_twilight_end",
7: "civil_twilight_end",
8: "sunset",
}
class Location(BaseModel):
"""Provides methods for working with locations.
Parameters
----------
name : str
Name of location.
lat : float
Decimal latitude.
lon : float
Decimal longitude.
elevation_m : NonNegativeFloat
Elevation in metres.
tz : timezone
Timezone offset. Can be given as ISO8601 timezone offset str (e.g. 'Z' or
'+10:00' or simply '+10').
Examples
--------
>>> Location(
... name="canberra",
... lat=-35.293056,
... lon=149.126944,
... elevation_m=578,
... tz="+10:00",
... )
Location(name='canberra', lat=-35.293056, lon=149.126944, elevation_m=578.0, tz=Timezone(datetime.timezone(datetime.timedelta(seconds=36000))))
>>> Location(
... name="greenwich",
... lat=51.48,
... lon=0,
... elevation_m=47,
... tz="Z",
... )
Location(name='greenwich', lat=51.48, lon=0.0, elevation_m=47.0, tz=Timezone(datetime.timezone.utc))
>>> Location(
... name="nyc",
... lat=40.712778,
... lon=-74.006111,
... elevation_m=10,
... tz="-05",
... )
Location(name='nyc', lat=40.712778, lon=-74.006111, elevation_m=10.0, tz=Timezone(datetime.timezone(datetime.timedelta(days=-1, seconds=68400))))
"""
name: str = Field(
..., description="Name of location. Used to link to camera placements."
)
lat: float = Field(..., description="Decimal latitude.")
lon: float = Field(..., description="Decimal longitude.")
elevation_m: NonNegativeFloat = Field(..., description="Elevation in metres.")
tz: Timezone = Field(..., description="ISO8601 timezone offset.")
class Config:
schema_extra = {
"description": "Contains spatial data on locations, including timezone."
}
@property
def _dark_twilight_day(self):
return almanac.dark_twilight_day(
ephemeris, wgs84.latlon(self.lat, self.lon, elevation_m=self.elevation_m)
)
def _get_tz_aware_dt(self, dt: datetime) -> datetime:
if dt.tzinfo is None:
dt = dt.replace(tzinfo=self.tz._timezone)
return dt
def twilight_state(self, dt: datetime) -> int:
"""Gets the twilight state for the location at the specified time(s).
The meanings of the returned integer values are
0. Dark of night.
1. Astronomical twilight.
2. Nautical twilight.
3. Civil twilight.
4. Daytime.
Parameters
----------
dt : datetime
datetime to evaluate. If timezone-naive, timezone will be taken from
self.tz.
Returns
-------
ts : int
Twilight value.
Examples
--------
>>> location = Location(
... name="canberra",
... lat=-35.293056,
... lon=149.126944,
... elevation_m=578,
... tz="+10:00",
... )
>>> location.twilight_state(datetime.fromisoformat("2021-07-28T12:00:00+10:00"))
4
>>> location.twilight_state(datetime.fromisoformat("2021-07-28T23:00:00+11:00"))
0
Timezone will be taken from self.tz if dt is timezone-naive.
>>> location.twilight_state(datetime.fromisoformat("2021-07-28T12:00:00"))
4
Skyfield provides mapping from these numbers to strings.
>>> almanac.TWILIGHTS[0]
'Night'
>>> almanac.TWILIGHTS[1]
'Astronomical twilight'
>>> almanac.TWILIGHTS[2]
'Nautical twilight'
>>> almanac.TWILIGHTS[3]
'Civil twilight'
>>> almanac.TWILIGHTS[4]
'Day'
"""
time = timescale.from_datetime(self._get_tz_aware_dt(dt))
return int(self._dark_twilight_day(time))
def twilight_states(self, datetimes: Sequence[datetime]) -> np.ndarray:
"""Like Location.twilight_state but operates on sequence of datetimes.
Parameters
----------
datetimes : Sequence[datetime]
datetimes to evaluate. If timezone-naive, timezone will be taken from
self.tz.
Returns
-------
ts : np.ndarray
Twilight values.
Examples
--------
>>> location = Location(
... name="canberra",
... lat=-35.293056,
... lon=149.126944,
... elevation_m=578,
... tz=timezone(timedelta(hours=10)),
... )
>>> datetimes = [
... datetime.fromisoformat("2021-07-28T12:00:00+10:00"),
... datetime.fromisoformat("2021-07-28T23:00:00+11:00"),
... datetime.fromisoformat("2021-07-28T12:00:00"),
... ]
>>> location.twilight_states(datetimes)
array([4, 0, 4])
"""
datetimes = [self._get_tz_aware_dt(dt) for dt in datetimes]
times = timescale.from_datetimes(datetimes)
return self._dark_twilight_day(times)
def search_sun_times(self, day: date) -> dict[str, datetime]:
"""Gets sunrise, sunset, and twilight times for a given date.
Parameters
----------
day : date
Day to get times from.
Returns
-------
twilight_times : dict[str, datetime]
dictionary with keys "astronomical_twilight_start",
"nautical_twilight_start", "civil_twilight_start", "sunrise", "sunset",
"nautical_twilight_end", "civil_twilight_end", "astronomical_twilight_end".
Examples
--------
>>> location = Location(
... name="canberra",
... lat=-35.293056,
... lon=149.126944,
... elevation_m=578,
... tz=timezone(timedelta(hours=10)),
... )
>>> day = date(2021, 7, 28)
>>> tt = location.search_sun_times(day)
The ordering of the transitions is as expected.
>>> tt["astronomical_twilight_start"] < tt["nautical_twilight_start"]
True
>>> tt["nautical_twilight_start"] < tt["civil_twilight_start"]
True
>>> tt["civil_twilight_start"] < tt["sunrise"]
True
>>> tt["sunrise"] < tt["sunset"]
True
>>> tt["sunset"] < tt["civil_twilight_end"]
True
>>> tt["civil_twilight_end"] < tt["nautical_twilight_end"]
True
>>> tt["nautical_twilight_end"] < tt["astronomical_twilight_end"]
True
And all of the datetimes are on the correct day.
>>> all(d.date() == day for d in tt.values())
True
"""
start_time = datetime.combine(date=day, time=time(0), tzinfo=self.tz._timezone)
end_time = start_time + timedelta(days=1)
t0 = timescale.from_datetime(start_time)
t1 = timescale.from_datetime(end_time)
times, twilight_types = almanac.find_discrete(t0, t1, self._dark_twilight_day)
twilight_transitions = (
np.roll(twilight_types, 1) > twilight_types
) * 5 + twilight_types
twilight_times: dict[str, datetime] = {}
for t, tt in zip(times, twilight_transitions):
twilight_times[TWILIGHT_TRANSITIONS[tt]] = t.utc_datetime().astimezone(
self.tz._timezone
)
return twilight_times
def get_sun_time_dataframe(self, days: Sequence[date]) -> pd.DataFrame:
"""Calls self.search_sun_times on each day in days, and builds a DataFrame of
sun times.
Parameters
----------
days : Sequence[date]
Dates which will become index for dataframe.
Returns
-------
sun_df : pd.DataFrame
DataFrame indexed by location and date, with columns
"astronomical_twilight_start", "nautical_twilight_start",
"civil_twilight_start", "sunrise", "sunset", "nautical_twilight_end",
"civil_twilight_end", "astronomical_twilight_end".
Examples
--------
>>> location = Location(
... name="canberra",
... lat=-35.293056,
... lon=149.126944,
... elevation_m=578,
... tz=timezone(timedelta(hours=10)),
... )
>>> days = [date(2021, 7, 23), date(2021, 7, 24), date(2021, 7, 25)]
>>> sun_df = location.get_sun_time_dataframe(days)
>>> np.all(sun_df["sunset"] > sun_df["sunrise"])
True
>>> sun_df
astronomical_twilight_start ... astronomical_twilight_end
location date ...
canberra 2021-07-23 2021-07-23 05:36:52.178788+10:00 ... 2021-07-23 18:43:25.223475+10:00
2021-07-24 2021-07-24 05:36:20.903963+10:00 ... 2021-07-24 18:43:59.629041+10:00
2021-07-25 2021-07-25 05:35:48.170485+10:00 ... 2021-07-25 18:44:34.315154+10:00
<BLANKLINE>
[3 rows x 8 columns]
"""
sun_times: dict[str, list[pd.Timestamp]] = {
"astronomical_twilight_start": [],
"nautical_twilight_start": [],
"civil_twilight_start": [],
"sunrise": [],
"sunset": [],
"nautical_twilight_end": [],
"civil_twilight_end": [],
"astronomical_twilight_end": [],
}
for day in days:
sun_time_dict = self.search_sun_times(day)
for key in sun_times.keys():
sun_times[key].append(pd.Timestamp(sun_time_dict[key]))
sun_times["date"] = [pd.Timestamp(day) for day in days]
sun_times["location"] = [self.name for _ in range(len(days))]
sun_df = pd.DataFrame(data=sun_times)
sun_df.set_index(["location", "date"], inplace=True)
return sun_df
class WeatherStation(BaseModel):
"""Contains information on a weather station.
Parameters
----------
location : Location
Location of weather station.
data_file : FilePath
Path to csv file containing weather data from weather station.
"""
location: Location = Field(..., description="Location of weather station.")
data_file: FilePath = Field(
...,
description=(
"Path to csv file containing weather data from weather station. "
"The firt 6 lines of the file are skipped, "
"and the 7th should contain column headers. "
"Should have one line per date. "
"Minimally, the first column should be date, in YYYY-mm-dd format. "
),
)
class Config:
schema_extra = {"description": "Contains information on a weather station."}
def load_dataframe(self):
"""Loads weather data from self.data_file into a pd.DataFrame
Returns
-------
weather_df : pd.DataFrame
DataFrame with daily weather data, indexed by "weather_station" and "date".
"""
weather_df = pd.read_csv(
self.data_file, skiprows=5, header=0, parse_dates=["date"]
)
weather_df["weather_station"] = self.location.name
weather_df.set_index(["weather_station", "date"], inplace=True)
return weather_df
class LocationWeatherStationCollector(BaseModel):
"""Contains lists of Locations and Weather stations, and a mapping between them.
Parameters
----------
locations : list[Location]
list of locations where cameras have been placed.
weather_stations : list[WeatherStation]
list of weather stations.
location_weather_station_mapping : dict[str, str]
A mapping between location names and weather_station names.
"""
locations: list[Location] = Field(
..., description="list of locations where cameras have been placed."
)
weather_stations: list[WeatherStation] = Field(
..., description="list of weather stations."
)
location_weather_station_mapping: dict[str, str] = Field(
..., description="A mapping between location names and weather_station names."
)
class Config:
schema_extra = {"description": "Defines Locations and Weather stations."}
@validator("location_weather_station_mapping")
def mapping_contains_all_locations(cls, v, values):
if "locations" not in values:
return v
for location in values["locations"]:
assert (
location.name in v
), f"{location.name} missing from location_weather_station_mapping."
return v
@validator("location_weather_station_mapping")
def all_weather_stations_included(cls, v, values):
if "weather_stations" not in values:
return v
weather_station_location_names = set(
ws.location.name for ws in values["weather_stations"]
)
for val in v.values():
assert (
val in weather_station_location_names
), f"Undefined weather station {val}. Either remove from mapping or define."
return v
def get_sun_time_dataframe(self, days: dict[str, Sequence[date]]) -> pd.DataFrame:
"""Calls .get_sun_time_dataframe on each location in self.locations, and builds
a DataFrame of sun times.
Parameters
----------
days : dict[str, Sequence[date]]
Mapping from location name to sequences of dates which will become index for
the dataframe.
Returns
-------
sun_df : pd.DataFrame
DataFrame indexed by location and date, with columns
"astronomical_twilight_start", "nautical_twilight_start",
"civil_twilight_start", "sunrise", "sunset", "nautical_twilight_end",
"civil_twilight_end", "astronomical_twilight_end".
"""
sun_dfs = []
for location in self.locations:
dates = days[location.name]
sun_dfs.append(location.get_sun_time_dataframe(dates))
return pd.concat(sun_dfs)
def get_weather_dataframe(self) -> pd.DataFrame:
"""Calls .load_dataframe() on each WeatherStation in self.weather_stations, and
builds a DataFrame of weather data.
Returns
-------
weather_df : pd.DataFrame
DataFrame with daily weather data, indexed by "weather_station" and "date".
"""
weather_dfs = []
for weather_station in self.weather_stations:
weather_dfs.append(weather_station.load_dataframe())
return | pd.concat(weather_dfs) | pandas.concat |
import os
import pandas as pd
import yaml
import glob
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from tqdm import tqdm
def load_raw_data(cfg, save_raw_df=True, rate_class='all'):
'''
Load all entries for water consumption and combine into a single dataframe
:param cfg: project config
:param save_raw_df: Flag indicating whether to save the accumulated raw dataset
:param rate_class: Rate class to filter raw data by
:return: a Pandas dataframe containing all water consumption records
'''
cat_feats = cfg['DATA']['CATEGORICAL_FEATS']
num_feats = cfg['DATA']['NUMERICAL_FEATS']
bool_feats = cfg['DATA']['BOOLEAN_FEATS']
feat_names = ['CONTRACT_ACCOUNT', 'EFFECTIVE_DATE', 'END_DATE', 'CONSUMPTION'] + num_feats + bool_feats + cat_feats
raw_data_filenames = glob.glob(cfg['PATHS']['RAW_DATA_DIR'] + "/*.csv")
rate_class_str = 'W&S_' + rate_class.upper()
print('Loading raw data from spreadsheets.')
raw_df = pd.DataFrame()
for filename in tqdm(raw_data_filenames):
df = pd.read_csv(filename, encoding='ISO-8859-1', low_memory=False, index_col=False) # Load a water demand CSV
if rate_class_str in df['RATE_CLASS'].unique().tolist():
df = df[df['RATE_CLASS'] == rate_class_str] # Filter by a rate class if desired
for f in df.columns:
if ' 'in f or '"' in f:
df.rename(columns={f: f.replace(' ', '').replace('"', '')}, inplace=True)
for f in feat_names:
if f not in df.columns:
if f in cat_feats:
df[f] = 'Unknown'
else:
df[f] = 0.0
if f in num_feats and df[f].dtype == 'object':
try:
df[f] = pd.to_numeric(df[f], errors='coerce')
df[f].fillna(0, inplace=True)
except Exception as e:
print("Exception ", e, " in file ", filename, " feature ", f)
df = df[feat_names]
df['EFFECTIVE_DATE'] = pd.to_datetime(df['EFFECTIVE_DATE'], errors='coerce')
df['END_DATE'] = pd.to_datetime(df['END_DATE'], errors='coerce')
raw_df = pd.concat([raw_df, df], axis=0, ignore_index=True) # Concatenate next batch of data
shape1 = raw_df.shape
raw_df.drop_duplicates(['CONTRACT_ACCOUNT', 'EFFECTIVE_DATE', 'END_DATE'], keep='last', inplace=True) # Drop duplicate entries appearing in different data slices
print("Deduplication: ", shape1, "-->", raw_df.shape)
print('Consumption total: ', raw_df['CONSUMPTION'].sum())
print(raw_df.shape)
# Replace X's representing true for boolean feats with 1
print('Cleaning data.')
raw_df[bool_feats] = raw_df[bool_feats].replace({'X': 1, 'On': 1, 'Discon': 0, ' ': 0})
raw_df['EST_READ'] = raw_df['EST_READ'].astype('object')
# Fill in missing data
if 'EST_READ' in cat_feats:
raw_df['EST_READ'] = raw_df['EST_READ'].astype('str') + '_' # Force treatment as string
raw_df[['CONSUMPTION'] + num_feats + bool_feats] = raw_df[['CONSUMPTION'] + num_feats + bool_feats].fillna(0)
raw_df[cat_feats] = raw_df[cat_feats].fillna('MISSING')
if save_raw_df:
raw_df.to_csv(cfg['PATHS']['RAW_DATASET'], sep=',', header=True, index_label=False, index=False)
return raw_df
def calculate_ts_data(cfg, raw_df, start_date=None):
'''
Calculates estimates for daily water consumption based on provided historical data. Assumes each client consumes
water at a uniform rate over the billing period. Produces a time series dataset indexed by date.
:param cfg: project config
:param raw_df: A DataFrame containing raw water consumption data
:param start_date: The minimum date at which at which to create daily estimates for
:return: a Pandas dataframe containing estimated daily water consumption
'''
print('Calculating estimates for daily consumption and contextual features.')
raw_df.drop('CONTRACT_ACCOUNT', axis=1, inplace=True)
if start_date is None:
min_date = raw_df['EFFECTIVE_DATE'].min()
else:
min_date = start_date
max_date = raw_df['END_DATE'].max() - timedelta(days=1)
cat_feats = cfg['DATA']['CATEGORICAL_FEATS']
num_feats = cfg['DATA']['NUMERICAL_FEATS']
bool_feats = cfg['DATA']['BOOLEAN_FEATS']
# Determine feature names for preprocessed dataset
date_range = pd.date_range(start=min_date, end=max_date)
daily_df_feat_init = {'Date': date_range, 'Consumption': 0}
for f in num_feats:
daily_df_feat_init[f + '_avg'] = 0.0
daily_df_feat_init[f + '_std'] = 0.0
for f in bool_feats:
daily_df_feat_init[f] = 0.0
for f in cat_feats:
for val in raw_df[f].unique():
daily_df_feat_init[f + '_' + str(val)] = 0.0
daily_df = pd.DataFrame(daily_df_feat_init)
daily_df.set_index('Date', inplace=True)
def daily_consumption(cons, start_date, end_date):
bill_period = (end_date - start_date + timedelta(days=1)).days # Get length of billing period
if bill_period > 0:
return cons / bill_period # Estimate consumption per day over billing period
else:
return 0
# Populating features for daily prediction
for date in tqdm(date_range):
daily_snapshot = raw_df.loc[(raw_df['EFFECTIVE_DATE'] <= date) & (raw_df['END_DATE'] >= date)]
for f in num_feats:
daily_df.loc[date, f + '_avg'] = daily_snapshot[f].mean()
daily_df.loc[date, f + '_std'] = daily_snapshot[f].std()
for f in bool_feats:
daily_df.loc[date, f] = daily_snapshot[f].mean()
for f in cat_feats:
fractions = daily_snapshot[f].value_counts(normalize=True)
for val, fraction in fractions.items():
daily_df.loc[date, f + '_' + str(val)] = fraction
try:
daily_df.loc[date, 'Consumption'] = (daily_snapshot.apply(lambda row : daily_consumption(row['CONSUMPTION'],
row['EFFECTIVE_DATE'], row['END_DATE']), axis=1)).sum()
except Exception as e:
print(date, e)
daily_df.loc[date, 'Consumption'] = 0.0
# TODO delete once we have no missing data
for missing_range_endpts in cfg['DATA']['MISSING_RANGES']:
missing_range = pd.date_range(pd.to_datetime(missing_range_endpts[0]), pd.to_datetime(missing_range_endpts[1]))
daily_df = daily_df[~daily_df.index.isin(missing_range)] # Remove noise from missing date ranges
return daily_df
def preprocess_ts(cfg=None, save_raw_df=True, save_prepr_df=True, rate_class='all', out_path=None):
'''
Transform raw water demand data into a time series dataset ready to be fed into a model.
:param cfg: project config
:param save_raw_df: Flag indicating whether to save intermediate raw data
:param save_prepr_df: Flag indicating whether to save the preprocessed data
:param rate_class: Rate class to filter by
:param out_path: Path to save updated preprocessed data
'''
run_start = datetime.today()
tqdm.pandas()
if cfg is None:
cfg = yaml.full_load(open("./config.yml", 'r')) # Load project config data
raw_df = load_raw_data(cfg, rate_class=rate_class, save_raw_df=save_raw_df)
preprocessed_df = calculate_ts_data(cfg, raw_df)
preprocessed_df = preprocessed_df[cfg['DATA']['START_TRIM']:-cfg['DATA']['END_TRIM']]
if save_prepr_df:
out_path = cfg['PATHS']['PREPROCESSED_DATA'] if out_path is None else out_path
preprocessed_df.to_csv(out_path, sep=',', header=True)
print("Done. Runtime = ", ((datetime.today() - run_start).seconds / 60), " min")
return preprocessed_df
def preprocess_new_data(cfg, save_raw_df=True, save_prepr_df=True, rate_class='all', out_path=None):
'''
Preprocess a new raw data file and merge it with preexisting preprocessed data.
:param cfg: Project config
:param save_df: Flag indicating whether to save the combined preprocessed dataset
:param rate_class: Rate class to filter raw data by
:param out_path: Path to save updated preprocessed data
'''
# Load new raw data and remove any rows that appear in old raw data
old_raw_df = pd.read_csv(cfg['PATHS']['RAW_DATASET'], low_memory=False)
old_raw_df['EFFECTIVE_DATE'] = pd.to_datetime(old_raw_df['EFFECTIVE_DATE'], errors='coerce')
min_preprocess_date = old_raw_df['EFFECTIVE_DATE'].max() - timedelta(days=183) # Latest date in old raw dataset minus 1/2 year, to be safe
new_raw_df = load_raw_data(cfg, rate_class=rate_class, save_raw_df=save_raw_df)
if new_raw_df.shape[1] > old_raw_df.shape[1]:
new_raw_df = new_raw_df[old_raw_df.columns] # If additional features added, remove them
# Preprocess new raw data
new_preprocessed_df = calculate_ts_data(cfg, new_raw_df, start_date=min_preprocess_date)
# Load old preprocessed data
old_preprocessed_df = pd.read_csv(cfg['PATHS']['PREPROCESSED_DATA'])
old_preprocessed_df['Date'] = pd.to_datetime(old_preprocessed_df['Date'], errors='coerce')
old_preprocessed_df = old_preprocessed_df[old_preprocessed_df['Date'] < min_preprocess_date]
old_preprocessed_df.set_index('Date', inplace=True)
# Combine old and new preprocessed data
preprocessed_df = pd.concat([old_preprocessed_df, new_preprocessed_df], axis=0)
preprocessed_df = preprocessed_df[:-cfg['DATA']['END_TRIM']]
if save_prepr_df:
out_path = cfg['PATHS']['PREPROCESSED_DATA'] if out_path is None else out_path
preprocessed_df.to_csv(out_path, sep=',', header=True)
return preprocessed_df
def merge_raw_data(cfg=None):
'''
Loads all raw water demand CSVs available and merges it into one dataset, keeping the latest consumption records
for each client if readings are duplicated.
:param cfg: Project config
'''
if cfg is None:
cfg = yaml.full_load(open("./config.yml", 'r')) # Load project config data
# Load old merged raw data file
merged_raw_df = pd.DataFrame()
# Loop through all raw data files and concatenate them with the old merged one, de-duplicating rows as needed
quarterly_raw_data_filenames = glob.glob(cfg['PATHS']['RAW_DATA_DIR'] + "/*.csv")
for filename in tqdm(quarterly_raw_data_filenames):
quarterly_raw_df = pd.read_csv(filename, encoding='ISO-8859-1', low_memory=False) # Load a water demand CSV
quarterly_raw_df['EFFECTIVE_DATE'] = pd.to_datetime(quarterly_raw_df['EFFECTIVE_DATE'], errors='coerce')
quarterly_raw_df['END_DATE'] = pd.to_datetime(quarterly_raw_df['END_DATE'], errors='coerce')
merged_raw_df = | pd.concat([merged_raw_df, quarterly_raw_df], axis=0, ignore_index=True) | pandas.concat |
import json
import plotly
import pandas as pd
import numpy as np
from ast import literal_eval
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar
from sklearn.externals import joblib
from sqlalchemy import create_engine
app = Flask(__name__)
def tokenize(text):
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def _count_words(X):
"""Transforms messages Series the number of words it contains"""
X = | pd.Series(X) | pandas.Series |
from typing import Dict, Optional, Any
import numpy as np
import scipy
import pandas as pd
import threading
def black_scholes(S, K, T, rf: float, iv, option_type) -> pd.DataFrame:
"""
Black Scholes modeling function.
* https://www.newyorkfed.org/medialibrary/media/research/staff_reports/sr677.pdf
* https://github.com/hashABCD/opstrat/blob/main/opstrat/blackscholes.py
* https://www.smileofthales.com/computation/options-greeks-python/
* https://github.com/vpatel576/option_probabilites
* https://github.com/skp1999/McMillan-s-Profit-Probability-Calculator/blob/main/POP_Calculation.py
* https://option-price.com/index.php
Option Value: Theoretical premium value.
Delta : Measures Impact of a Change in the Price of Underlying
Gamma: Measures the Rate of Change of Delta
Theta: Measures Impact of a Change in Time Remaining
Vega: Measures Impact of a Change in Volatility
Rho: Measures the impact of changes in Interest rates
:param S: Underlying Asset or Stock Price ($).
:param K: Strike or Excercise Price ($).
:param T: Expiry time of the option (days).
:param rf: Risk-free rate (decimal number range 0-1).
:param iv: Volatility (decimal).
:param option_type: Calls or Puts option type.
:return: Dataframe 'option_value', 'intrinsic_value', and 'time_value'.
Greeks has delta, gamma, theta, rho.
"""
# Check inputs.
is_type = np.isin(option_type, ['calls', 'puts', 'call', 'put', 'c', 'p'])
assert np.all(is_type) == 1, "Enter Calls or Puts options only."
t = np.maximum(T / 365, 0.00001) # Avoid infinite when T = 0.
iv = np.maximum(iv, 0.00001) # Avoid infinite when iv = 0.
n1 = np.log(S / K)
n2 = (rf + iv ** 2 / 2) * t
d = iv * np.sqrt(t)
d1 = (n1 + n2) / d
d2 = d1 - d
f = np.where(np.isin(option_type, ['calls', 'call', 'c']), 1, -1)
N_d1 = scipy.stats.norm.cdf(f * d1)
N_d2 = scipy.stats.norm.cdf(f * d2)
A = S * N_d1
B = K * N_d2 * np.exp(-rf * t)
# Option pricing.
val = f * (A - B)
val_int = np.maximum(0.0, f * (S - K))
val_time = val - val_int
# Greeks.
delta = f * N_d1
gamma = np.exp((-d1 ** 2) / 2) / (S * iv * np.sqrt(2 * np.pi * t))
theta = (-S * iv * np.exp(-d1 ** 2 / 2) / np.sqrt(8 * np.pi * t)
- f * (N_d2 * rf * K * np.exp(-rf * t))) / 365
vega = ((S * np.sqrt(t) * np.exp((-d1 ** 2) / 2))
/ (np.sqrt(2 * np.pi) * 100))
rho = f * t * K * N_d2 * np.exp(-rf * t) / 100
# Returns Dataframe.
return pd.DataFrame({
'option_value_bs': val,
'intrinsic_value': val_int,
'time_value': val_time,
'delta': delta,
'gamma': gamma,
'theta': theta,
'vega': vega,
'rho': rho
})
def monte_carlo(
key: str,
S: float,
K: float,
T: int,
rf: float,
iv: float,
option_type: str,
n: int = 200,
rng: Any = None) -> Dict[str, Optional[float]]:
"""
Monte Carlo modeling function.
Monte Carlo allows us to simulate seemingly random events, and assess
risks (among other results, of course). It has been used to assess the
risk of a given trading strategy.
* https://python.plainenglish.io/monte-carlo-options-pricing-in-two-lines-of-python-cf3a39407010
* https://www.youtube.com/watch?v=sS7GtIFFr_Y
* https://pythonforfinance.net/2016/11/28/monte-carlo-simulation-in-python/
* https://aaaquants.com/2017/09/01/monte-carlo-options-pricing-in-two-lines-of-python/#page-content
This function is not fully vectorized. It needs to be in a loop, with
each row passed for processing. All results are summarized by a Numpy func.
Usage:
::
vector_profit_probability = np.vectorize(monte_carlo)
pop = vector_profit_probability(
S=curr_price,
K=opt['strike'].to_numpy(),
T=opt['dte'].to_numpy(),
rf=ten_yr,
iv=opt['volatility'].to_numpy(),
option_type=opt['option_type'].to_numpy(),
rng=rng,
n=1000
)
:param key: Key per result. Useful for future concatenation.
:param S: Underlying Asset or Stock Price ($).
:param K: Strike or Excercise Price ($).
:param T: Expiry time of the option (days).
:param rf: Risk-free rate (decimal number range 0-1).
:param iv: Volatility (decimal).
:param option_type: Calls or Puts option type.
:param n: Number of Monte Carlo iterantions. min 100,000 recommended.
:param rng: Random range generator, used when in loops.
:return: Dictionary with keys: value, greeks.
Value has 'option_value', 'intrinsic_value', and 'time_value'.
Greeks jas delta, gamma, theta, rho.
"""
# Check inputs.
assert option_type in ['calls', 'puts', 'call', 'put', 'c', 'p'], \
"Enter Calls or Puts options only."
# np.random.seed(25) # Use for consistent testing.
T = T if T != 0 else 1
D = np.exp(-rf * (T / 252))
# Randomized array of number of days x simulations, based of current price.
# P = np.cumprod(1 + np.random.randn(n, T) * iv / np.sqrt(252), axis=1) * S
# Generating random range is expensive, so doing it once.
rng = np.random.Generator(np.random.PCG64()) if rng is None else rng
rnd = rng.standard_normal((n, T), dtype=np.float32)
P = np.cumprod(1 + rnd * iv / np.sqrt(252), axis=1) * S
# Series on last day of simulation with premium difference.
p_last = P[:, -1] - K * D
# If calls, take only positive results. If puts, take negatives.
if option_type in ['calls', 'call', 'c']:
arr = np.where(p_last > 0, p_last, 0)
else:
arr = -np.where(p_last < 0, p_last, 0)
# Take the average values of all the iterations on the last day.
val = np.mean(arr)
# Probability of Profit.
pop_ITM = round(np.count_nonzero(arr) / p_last.size, 2)
# Probability of Making 50% Profit.
profit_req = 0.50
if option_type in ['calls', 'call', 'c']:
arr = np.where(p_last > profit_req * val, p_last, 0)
else:
arr = -np.where(p_last < profit_req * val, p_last, 0)
p50 = round(np.count_nonzero(arr) / p_last.size, 2)
# Returns Dictionary.
# Calculating quantiles is expensive, so only uncomment if necessary.
return {
'symbol': key,
'option_value_mc': val, # Average value. Near Black Scholes Value.
# 'value_quantile_5': np.percentile(p_last, 5), # 5% chance below X
# 'value_quantile_50': np.percentile(p_last, 50), # 50% chance lands here.
# 'value_quantile_95': np.percentile(p_last, 95), # 5% chance above X.
'probability_ITM': pop_ITM, # Probability of ending ITM.
'probability_of_50': p50, # Probability of makeing half profit.
}
def mc_numpy_vector(*args):
"""
Monte Carlo simulations vectorized so that arrays work in calculations
DEPRECATED: It's faster to multithread this operation.
"""
curr_price, opt, ten_yr, rng, montecarlo_iterations = args
vector_monte_carlo = np.vectorize(monte_carlo)
_pop = vector_monte_carlo(
key=opt['contractSymbol'],
S=curr_price,
K=opt['strikePrice'].to_numpy(),
T=opt['daysToExpiration'].to_numpy(),
rf=ten_yr,
iv=opt['volatility'].to_numpy(),
option_type=opt['option_type'].to_numpy(),
rng=rng,
n=montecarlo_iterations
)
return pd.DataFrame.from_records(_pop) # pd.json_normalize(_pop.T)
def mc_multi_threading(*args):
"""
Monte Carlo simulations vectorized so that arrays work in calculations.
Multithreaded, means one CPU works multiple I/O.
:param args: Passing all parameters from call.
:return: Dataframe with results. Including a key to join later.
"""
def threader(opt, ten_yr, rng, montecarlo_iterations):
_pop = vector_monte_carlo(
key=opt.index.get_level_values('symbol').to_numpy(),
S=opt['lastPrice'].to_numpy(),
K=opt['strikePrice'].to_numpy(),
T=opt['daysToExpiration'].to_numpy(),
rf=ten_yr,
iv=opt['volatility'].to_numpy(),
option_type=opt.index.get_level_values('option_type').to_numpy(),
rng=rng,
n=montecarlo_iterations
)
rez.append(_pop)
rez = [] # List of dictionaries
_opt, _ten_yr, _rng, _montecarlo_iterations, _chunks = args
vector_monte_carlo = np.vectorize(monte_carlo)
# Chunking tables in groups of 'chunks' values. Each a separate thread.
dtes = _opt['daysToExpiration'].unique() # List of DTE's
d_chunk = [dtes[i:i + _chunks] for i in range(0, len(dtes), _chunks)]
df_chunks = [_opt[(_opt['daysToExpiration'].isin(dte))] for dte in d_chunk]
# Multi-threading.
threads = []
for df in df_chunks:
arg = (df, _ten_yr, _rng, _montecarlo_iterations)
t = threading.Thread(target=threader, args=arg)
threads.append(t)
[thread.start() for thread in threads] # Kickoff threading.
[thread.join() for thread in threads] # Stop all threads.
# Flatten list
_result = []
for i in range(len(rez)):
for j in rez[i]:
_result.append(j)
return pd.DataFrame.from_records(_result)
class Modeling:
def __init__(self, con, option_df):
self.options = option_df.options # Options coming in.
self.quote = None # All quote data.
self.rf = 0 # Risk-free rate, i.e. 10-yr t-bill for modeling.
self.prepare_tables(con)
def get_quotes(self, con):
"""
Get current price data from TDA
Source: https://developer.tdameritrade.com/quotes/apis/get/marketdata/quotes
:return: Current underlying stock price merged into the options table.
"""
import httpx
tickers = self.options.index.get_level_values('stock').unique().to_list()
q = con.client.get_quotes(tickers)
assert q.status_code == httpx.codes.OK, q.raise_for_status()
prep = [v for k, v in q.json().items()]
self.quote = pd.DataFrame.from_dict(prep)
self.quote.rename(columns={'symbol': 'stock'}, inplace=True)
def get_last_price(self, con):
self.get_quotes(con)
last_price = self.quote[[
"stock",
# "description",
# "bidPrice",
# "bidSize",
# "bidId",
# "askPrice",
# "askSize",
# "askId",
"lastPrice",
# "lastSize",
# "lastId",
# "openPrice",
# "highPrice",
# "lowPrice",
# "closePrice",
# "netChange",
# "totalVolume",
# "quoteTimeInLong",
# "tradeTimeInLong",
# "mark",
# "exchange",
# "exchangeName",
# "marginable",
# "shortable",
# "volatility",
# "digits",
# "52WkHigh",
# "52WkLow",
# "peRatio",
# "divAmount",
# "divYield",
# "divDate",
# "securityStatus",
# "regularMarketLastPrice",
# "regularMarketLastSize",
# "regularMarketNetChange",
# "regularMarketTradeTimeInLong",
]]
last_price.set_index('stock', inplace=True)
# pd.merge(self.options, last_price, on='stock')
self.options = self.options.join(last_price, on='stock')
def get_risk_free_rate(self):
# Get 10-yr risk-free rate from FRED.
_url = "https://fred.stlouisfed.org/graph/fredgraph.csv?id=DGS10"
_csv = pd.read_csv(_url)
_value = _csv['DGS10'].values[-1]
self.rf = float(_value) / 100
def prepare_tables(self, con):
self.get_last_price(con)
self.get_risk_free_rate()
def black_scholes(self):
df = self.options.copy()
if all(i in df.columns for i in ['putCall', 'symbol']):
df = df.drop(columns=['putCall', 'symbol']).reset_index()
# Black Scholes data here. Option value, greeks.
bsch = black_scholes(
S=df['lastPrice'].to_numpy(),
K=df['strikePrice'].to_numpy(),
T=df['daysToExpiration'].to_numpy(),
rf=self.rf,
iv=df['volatility'].to_numpy(),
option_type=df['option_type'].to_numpy()
)
df = | pd.concat([df, bsch], axis='columns') | pandas.concat |
import pandas as pd
def gcp_image_classification_transformation(json_data: dict):
all_annotations = []
for obj in json_data:
cloud_uri = obj.get("data").get("image")
annotations = obj.get("annotations")[0]
results = annotations.get("result")
class_name = results[0].get("value").get("choices")[0]
all_annotations.append([cloud_uri, class_name])
annotations_df = | pd.DataFrame(all_annotations) | pandas.DataFrame |
import math
import cv2
import numpy as np
import pandas as pd
##################################################################
## ##
## create a 3d skeleton video in a blank background ##
## ##
##################################################################
# {0, "Nose"}
# {1, "Neck"},
# {2, "RShoulder"},
# {3, "RElbow"},
# {4, "RWrist"},
# {5, "LShoulder"},
# {6, "LElbow"},
# {7, "LWrist"},
# {8, "REye"},
# {9, "LEye"},
# {10, "MidHip"},
PARTS = [
"Nose",
"Neck",
"RShoulder",
"RElbow",
"RWrist",
"LShoulder",
"LElbow",
"LWrist",
"REye",
"LEye",
"MidHip",
]
SKELETON_EDGES = np.array(
[
[0, 1],
[1, 2],
[2, 3],
[3, 4],
[1, 5],
[5, 6],
[6, 7],
[1, 10],
[8, 0],
[9, 0],
]
)
# theta, phi = 3.1415/4, -3.1415/6
theta, phi = -0.3, 0.24
should_rotate = False
scale_dx = 800
scale_dy = 800
# plot 3d skeleton
class Plotter3d:
def __init__(
self,
canvas_size,
origin=(0.5, 0.5),
scale=1,
parts=PARTS,
skeleton_edges=SKELETON_EDGES,
):
self.origin = np.array(
[origin[1] * canvas_size[1], origin[0] * canvas_size[0]],
dtype=np.float32,
) # x, y
self.scale = np.float32(scale)
self.theta = 0
self.phi = 0
self.parts = parts
self.skeleton_edges = skeleton_edges
axis_length = 200
axes = [
np.array(
[
[-axis_length / 2, -axis_length / 2, 0],
[axis_length / 2, -axis_length / 2, 0],
],
dtype=np.float32,
),
np.array(
[
[-axis_length / 2, -axis_length / 2, 0],
[-axis_length / 2, axis_length / 2, 0],
],
dtype=np.float32,
),
np.array(
[
[-axis_length / 2, -axis_length / 2, 0],
[-axis_length / 2, -axis_length / 2, axis_length],
],
dtype=np.float32,
),
]
step = 20
for step_id in range(axis_length // step + 1): # add grid
axes.append(
np.array(
[
[
-axis_length / 2,
-axis_length / 2 + step_id * step,
0,
],
[
axis_length / 2,
-axis_length / 2 + step_id * step,
0,
],
],
dtype=np.float32,
)
)
axes.append(
np.array(
[
[
-axis_length / 2 + step_id * step,
-axis_length / 2,
0,
],
[
-axis_length / 2 + step_id * step,
axis_length / 2,
0,
],
],
dtype=np.float32,
)
)
self.axes = np.array(axes)
def plot(self, img, vertices, edges):
global theta, phi
img.fill(0)
R = self._get_rotation(theta, phi)
self._draw_axes(img, R)
if len(edges) != 0:
self._plot_edges(img, vertices, edges, R)
def _draw_axes(self, img, R):
axes_2d = np.dot(self.axes, R)
axes_2d = axes_2d * self.scale + self.origin
for axe in axes_2d:
axe = axe.astype(int)
cv2.line(
img,
tuple(axe[0]),
tuple(axe[1]),
(128, 128, 128),
1,
cv2.LINE_AA,
)
def _plot_edges(self, img, vertices, edges, R):
vertices_2d = np.dot(vertices, R)
edges_vertices = (
vertices_2d.reshape((-1, 2))[edges] * self.scale + self.origin
)
for edge_vertices in edges_vertices:
edge_vertices = edge_vertices.astype(int)
cv2.line(
img,
tuple(edge_vertices[0]),
tuple(edge_vertices[1]),
(255, 255, 255),
1,
cv2.LINE_AA,
)
def _get_rotation(self, theta, phi):
sin, cos = math.sin, math.cos
return np.array(
[
[cos(theta), sin(theta) * sin(phi)],
[-sin(theta), cos(theta) * sin(phi)],
[0, -cos(phi)],
],
dtype=np.float32,
) # transposed
@staticmethod
def mouse_callback(event, x, y, flags, params):
global previous_position, theta, phi, should_rotate, scale_dx, scale_dy
if event == cv2.EVENT_LBUTTONDOWN:
previous_position = [x, y]
should_rotate = True
if event == cv2.EVENT_MOUSEMOVE and should_rotate:
theta += (x - previous_position[0]) / scale_dx * 6.2831 # 360 deg
phi -= (
(y - previous_position[1]) / scale_dy * 6.2831 * 2
) # 360 deg
phi = max(min(3.1415 / 2, phi), -3.1415 / 2)
previous_position = [x, y]
if event == cv2.EVENT_LBUTTONUP:
should_rotate = False
# read skeleton data from the csv
def read_csv(filename):
"""
Parameters
----------
filename : str
Path to the CSV file.
Returns
-------
df_new : dataframe
Normalised coordinates of 3D pose.
"""
dataframe = | pd.read_csv(filename, index_col="Body Part") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 7 14:28:48 2021
@author: hqin
Description:
This script main used to extract the intersect sites with one liminted depth(eg: totalRN>=10) in all reps,
in order to used varify the accuracy of our LSTM models among reps with the same sites with same depth.
This can avoid the effect of different depth on predicted m6A sites among reps.
"""
import pandas as pd
import argparse, sys, os
import dataFrame_merge as dFm
import Python_matlibPlot_V2 as pmp
import matplotlib.pyplot as plt
import matplotlib_venn as mv
import venn
# import seaborn as sns
def get_path(flist):
"""
path/file1 name1 Chr,genoLoci,strand,geneid,...
path/file2 name2 Chr,genoLoci,strand,geneid,...
"""
files = list()
for i, line in enumerate(open(flist,'r',encoding="utf-8")):
file = line.strip().split("\t")[0]
files.append(file)
path = os.path.dirname(files[0])
return path
def get_label(df, labels):
"""
labels->label:
Chr,genoLoci,strand -> Chr_genoLoci_strand
"""
labs =list()
for item in df.iterrows():# 按行遍历df
item = item[1] #返回该行所有值,item[0]为返回改行index
ele = "_".join([str(item[i]) for i in labels])
labs.append(ele)
return labs
def Extract_DepthSites(dfsList, labelList, sampleNameList, depth, ratio):
dfs = list()
label = labelList[0]
for i, df in enumerate(dfsList):
df["label"] = get_label(df, label)
#df["totalRN"] = df["totalRN"].astype("int64") #用于totalRN列数值直接就是整数的情况
df["totalRN"] = df["totalRN"].astype("float64").astype("int64") #用于totalRN列数值带小数的时候
df["ratio"] = df["ratio"].astype("float64")
# df = df.loc[df["totalRN"] >= df["totalRN"].mean() & df["ratio" >= 0.1]] #提取覆盖深度大于平均深度同时ratio大于等于0.1的行
df = df[(df["totalRN"] >= float(depth)) & (df["ratio"] >= float(ratio))]
sampleName = sampleNameList[i]
tmp = "LSTM_genoLoci_T%sR%s.txt"%(str(depth), str(ratio))
ouf = "_".join([sampleName, tmp])
oufPath = "/".join([path, ouf])
print(oufPath)
df.to_csv(oufPath, sep = "\t", index = False)
dfs.append(df)
return dfs
def extract_label(dfsList):
labelValList = list()
for i, df in enumerate(dfsList):
labelVal = df["label"].drop_duplicates().values.tolist()
labelValList.append(labelVal)
return labelValList
def Veen(dfsList, sampleNames):
ValueSet = list()
labelValList = extract_label(dfsList)
sampleNametup = tuple(sampleNames)
for i in range(len(labelValList)):
ValueSet.append(set(labelValList[i])) #covert each element of valuelist from list() to set()
my_dpi=150
#控制图尺寸的同时,使图高分辨率(高清)显示
plt.figure(figsize=(600/my_dpi, 600/my_dpi), dpi=my_dpi)
if len(labelValList) > 4:
sys.exit("Do not plot over 3 datasets")
elif len(labelValList) == 2:
g = mv.venn2(subsets= ValueSet,
set_labels = sampleNametup, #try ste_labels whether need a tuple() but ont list()
set_colors = ('#3d9a9b', '#c59a38'),
alpha = 0.8,
normalize_to = 1.0
)
elif len(labelValList) == 3:
g = mv.venn3(subsets= ValueSet,
set_labels = sampleNametup,
set_colors = ('#3d9a9b', '#8d4c4e', '#c59a38'),
alpha = 0.8,
normalize_to = 1.0
)
elif len(labelValList) == 4:
labels = venn.get_labels(list(ValueSet), fill=['number','percent'])
g, ax = venn.venn4(labels, names=list(sampleNametup),fontsize=8)
else:
"""
pyvenn plot over four-dimensional venn figures
https://zhuanlan.zhihu.com/p/195541937
"""
pass
# for text in g.set_labels:
# text.set_fontsize(8)
# for text in g.subset_labels:
# text.set_fontsize(6)
def Df_merge(dfs, labelList, sampleNames, depth, ratio):
label = labelList[0]
if len(dfs) <= 1:
sys.exit("Please check filelist contains two files at least.\n")
if len(dfs) == 2:
ins = pd.merge(dfs[0],dfs[1],on=label)
elif len(dfs) == 3:
ins1 = pd.merge(dfs[0],dfs[1],on=label)
ins = pd.merge(ins1,dfs[2],on=label)
elif len(dfs) == 4:
ins1 = pd.merge(dfs[0],dfs[1],on=label)
ins2 = | pd.merge(dfs[2],dfs[3],on=label) | pandas.merge |
from datetime import datetime, timedelta
import inspect
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
Timestamp,
cut,
date_range,
to_datetime,
)
import pandas.util.testing as tm
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.index = idx[::2]
def test_set_index(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df = df.set_index(idx)
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.set_index(idx[::2])
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame(
{"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2]}, index=[2010, 2011, 2012]
)
df2 = df.set_index(df.index.astype(np.int32))
tm.assert_frame_equal(df, df2)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys):
df = frame_of_index_cols
if isinstance(keys, list):
idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys)
else:
idx = Index(df[keys], name=keys)
expected = df.drop(keys, axis=1) if drop else df
expected.index = idx
if inplace:
result = df.copy()
result.set_index(keys, drop=drop, inplace=True)
else:
result = df.set_index(keys, drop=drop)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append(self, frame_of_index_cols, drop, keys):
df = frame_of_index_cols
keys = keys if isinstance(keys, list) else [keys]
idx = MultiIndex.from_arrays(
[df.index] + [df[x] for x in keys], names=[None] + keys
)
expected = df.drop(keys, axis=1) if drop else df.copy()
expected.index = idx
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append_to_multiindex(self, frame_of_index_cols, drop, keys):
# append to existing multiindex
df = frame_of_index_cols.set_index(["D"], drop=drop, append=True)
keys = keys if isinstance(keys, list) else [keys]
expected = frame_of_index_cols.set_index(["D"] + keys, drop=drop, append=True)
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
def test_set_index_after_mutation(self):
# GH1590
df = DataFrame({"val": [0, 1, 2], "key": ["<KEY>"]})
expected = DataFrame({"val": [1, 2]}, Index(["b", "c"], name="key"))
df2 = df.loc[df.index.map(lambda indx: indx >= 1)]
result = df2.set_index("key")
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# Add list-of-list constructor because list is ambiguous -> lambda
# also test index name if append=True (name is duplicate here for B)
@pytest.mark.parametrize(
"box",
[
Series,
Index,
np.array,
list,
lambda x: [list(x)],
lambda x: MultiIndex.from_arrays([x]),
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "B"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_single_array(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
key = box(df["B"])
if box == list:
# list of strings gets interpreted as list of keys
msg = "['one', 'two', 'three', 'one', 'two']"
with pytest.raises(KeyError, match=msg):
df.set_index(key, drop=drop, append=append)
else:
# np.array/list-of-list "forget" the name of B
name_mi = getattr(key, "names", None)
name = [getattr(key, "name", None)] if name_mi is None else name_mi
result = df.set_index(key, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, nothing is dropped
expected = df.set_index(["B"], drop=False, append=append)
expected.index.names = [index_name] + name if append else name
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# also test index name if append=True (name is duplicate here for A & B)
@pytest.mark.parametrize(
"box", [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x])]
)
@pytest.mark.parametrize(
"append, index_name",
[(True, None), (True, "A"), (True, "B"), (True, "test"), (False, None)],
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
keys = ["A", box(df["B"])]
# np.array/list "forget" the name of B
names = ["A", None if box in [np.array, list, tuple, iter] else "B"]
result = df.set_index(keys, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, only A is dropped, if at all
expected = df.set_index(["A", "B"], drop=False, append=append)
expected = expected.drop("A", axis=1) if drop else expected
expected.index.names = [index_name] + names if append else names
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# We also emulate a "constructor" for the label -> lambda
# also test index name if append=True (name is duplicate here for A)
@pytest.mark.parametrize(
"box2",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"box1",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "A"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays_duplicate(
self, frame_of_index_cols, drop, append, index_name, box1, box2
):
df = frame_of_index_cols
df.index.name = index_name
keys = [box1(df["A"]), box2(df["A"])]
result = df.set_index(keys, drop=drop, append=append)
# if either box is iter, it has been consumed; re-read
keys = [box1(df["A"]), box2(df["A"])]
# need to adapt first drop for case that both keys are 'A' --
# cannot drop the same column twice;
# use "is" because == would give ambiguous Boolean error for containers
first_drop = (
False if (keys[0] is "A" and keys[1] is "A") else drop # noqa: F632
)
# to test against already-tested behaviour, we add sequentially,
# hence second append always True; must wrap keys in list, otherwise
# box = list would be interpreted as keys
expected = df.set_index([keys[0]], drop=first_drop, append=append)
expected = expected.set_index([keys[1]], drop=drop, append=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_multiindex(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
keys = MultiIndex.from_arrays([df["A"], df["B"]], names=["A", "B"])
result = df.set_index(keys, drop=drop, append=append)
# setting with a MultiIndex will never drop columns
expected = df.set_index(["A", "B"], drop=False, append=append)
tm.assert_frame_equal(result, expected)
def test_set_index_verify_integrity(self, frame_of_index_cols):
df = frame_of_index_cols
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index("A", verify_integrity=True)
# with MultiIndex
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index([df["A"], df["A"]], verify_integrity=True)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_keys(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
with pytest.raises(KeyError, match="['foo', 'bar', 'baz']"):
# column names are A-E, as well as one tuple
df.set_index(["foo", "bar", "baz"], drop=drop, append=append)
# non-existent key in list with arrays
with pytest.raises(KeyError, match="X"):
df.set_index([df["A"], df["B"], "X"], drop=drop, append=append)
msg = "[('foo', 'foo', 'foo', 'bar', 'bar')]"
# tuples always raise KeyError
with pytest.raises(KeyError, match=msg):
df.set_index(tuple(df["A"]), drop=drop, append=append)
# also within a list
with pytest.raises(KeyError, match=msg):
df.set_index(["A", df["A"], tuple(df["A"])], drop=drop, append=append)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("box", [set], ids=["set"])
def test_set_index_raise_on_type(self, frame_of_index_cols, box, drop, append):
df = frame_of_index_cols
msg = 'The parameter "keys" may be a column key, .*'
# forbidden type, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(box(df["A"]), drop=drop, append=append)
# forbidden type in list, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(["A", df["A"], box(df["A"])], drop=drop, append=append)
# MultiIndex constructor does not work directly on Series -> lambda
@pytest.mark.parametrize(
"box",
[Series, Index, np.array, iter, lambda x: MultiIndex.from_arrays([x])],
ids=["Series", "Index", "np.array", "iter", "MultiIndex"],
)
@pytest.mark.parametrize("length", [4, 6], ids=["too_short", "too_long"])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_on_len(
self, frame_of_index_cols, box, length, drop, append
):
# GH 24984
df = frame_of_index_cols # has length 5
values = np.random.randint(0, 10, (length,))
msg = "Length mismatch: Expected 5 rows, received array of length.*"
# wrong length directly
with pytest.raises(ValueError, match=msg):
df.set_index(box(values), drop=drop, append=append)
# wrong length in list
with pytest.raises(ValueError, match=msg):
df.set_index(["A", df.A, box(values)], drop=drop, append=append)
def test_set_index_custom_label_type(self):
# GH 24969
class Thing:
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
# necessary for pretty KeyError
__repr__ = __str__
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing("Three", "pink")
msg = "<Thing 'Three'>"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_hashable_iterable(self):
# GH 24969
# actual example discussed in GH 24984 was e.g. for shapely.geometry
# objects (e.g. a collection of Points) that can be both hashable and
# iterable; using frozenset as a stand-in for testing here
class Thing(frozenset):
# need to stabilize repr for KeyError (due to random order in sets)
def __repr__(self):
tmp = sorted(list(self))
# double curly brace prints one brace in format string
return "frozenset({{{}}})".format(", ".join(map(repr, tmp)))
thing1 = Thing(["One", "red"])
thing2 = Thing(["Two", "blue"])
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing(["Three", "pink"])
msg = r"frozenset\(\{'Three', 'pink'\}\)"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_type_raises(self):
# GH 24969
# purposefully inherit from something unhashable
class Thing(set):
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame([[0, 2], [1, 3]], columns=[thing1, thing2])
msg = 'The parameter "keys" may be a column key, .*'
with pytest.raises(TypeError, match=msg):
# use custom label directly
df.set_index(thing2)
with pytest.raises(TypeError, match=msg):
# custom label wrapped in list
df.set_index([thing2])
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
ci.name = "B"
# with Categorical
df = DataFrame({"A": np.random.randn(10), "B": ci.values})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# from a CategoricalIndex
df = DataFrame({"A": np.random.randn(10), "B": ci})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# round-trip
idf = idf.reset_index().set_index("B")
tm.assert_index_equal(idf.index, ci)
def test_set_index_cast_datetimeindex(self):
df = DataFrame(
{
"A": [datetime(2000, 1, 1) + timedelta(i) for i in range(1000)],
"B": np.random.randn(1000),
}
)
idf = df.set_index("A")
assert isinstance(idf.index, DatetimeIndex)
def test_convert_dti_to_series(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx = DatetimeIndex(
to_datetime(["2013-1-1 13:00", "2013-1-2 14:00"]), name="B"
).tz_localize("US/Pacific")
df = DataFrame(np.random.randn(2, 1), columns=["A"])
expected = Series(
np.array(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
],
dtype="object",
),
name="B",
)
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
# convert to series while keeping the timezone
result = idx.to_series(keep_tz=True, index=[0, 1])
tm.assert_series_equal(result, expected)
# convert to utc
with tm.assert_produces_warning(FutureWarning):
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
result = df["B"]
comp = Series(DatetimeIndex(expected.values).tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
msg = (
"The default of the 'keep_tz' keyword in "
"DatetimeIndex.to_series will change to True in a future "
"release."
)
assert msg in str(m[0].message)
with tm.assert_produces_warning(FutureWarning):
result = idx.to_series(keep_tz=False, index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
# list of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
# GH 6785
# set the index manually
import pytz
df = DataFrame([{"ts": datetime(2014, 4, 1, tzinfo=pytz.utc), "foo": 1}])
expected = df.set_index("ts")
df.index = df["ts"]
df.pop("ts")
tm.assert_frame_equal(df, expected)
def test_reset_index_tz(self, tz_aware_fixture):
# GH 3950
# reset_index with single level
tz = tz_aware_fixture
idx = date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx")
df = DataFrame({"a": range(5), "b": ["A", "B", "C", "D", "E"]}, index=idx)
expected = DataFrame(
{
"idx": [
datetime(2011, 1, 1),
datetime(2011, 1, 2),
datetime(2011, 1, 3),
datetime(2011, 1, 4),
datetime(2011, 1, 5),
],
"a": range(5),
"b": ["A", "B", "C", "D", "E"],
},
columns=["idx", "a", "b"],
)
expected["idx"] = expected["idx"].apply(lambda d: Timestamp(d, tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
def test_set_index_timezone(self):
# GH 12358
# tz-aware Series should retain the tz
idx = to_datetime(["2014-01-01 10:10:10"], utc=True).tz_convert("Europe/Rome")
df = DataFrame({"A": idx})
assert df.set_index(idx).index[0].hour == 11
assert DatetimeIndex(Series(df.A))[0].hour == 11
assert df.set_index(df.A).index[0].hour == 11
def test_set_index_dst(self):
di = date_range("2006-10-29 00:00:00", periods=3, freq="H", tz="US/Pacific")
df = DataFrame(data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=di).reset_index()
# single level
res = df.set_index("index")
exp = DataFrame(
data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=Index(di, name="index")
)
tm.assert_frame_equal(res, exp)
# GH 12920
res = df.set_index(["index", "a"])
exp_index = MultiIndex.from_arrays([di, [0, 1, 2]], names=["index", "a"])
exp = DataFrame({"b": [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_reset_index_with_intervals(self):
idx = IntervalIndex.from_breaks(np.arange(11), name="x")
original = DataFrame({"x": idx, "y": np.arange(10)})[["x", "y"]]
result = original.set_index("x")
expected = DataFrame({"y": np.arange(10)}, index=idx)
tm.assert_frame_equal(result, expected)
result2 = result.reset_index()
tm.assert_frame_equal(result2, original)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([("foo", 1), ("foo", 2), ("bar", 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
result = df.set_index(df.columns[0])
expected = df.iloc[:, 1:]
expected.index = df.iloc[:, 0].values
expected.index.names = [df.columns[0]]
tm.assert_frame_equal(result, expected)
def test_set_index_empty_column(self):
# GH 1971
df = DataFrame(
[
{"a": 1, "p": 0},
{"a": 2, "m": 10},
{"a": 3, "m": 11, "p": 20},
{"a": 4, "m": 12, "p": 21},
],
columns=("a", "m", "p", "x"),
)
result = df.set_index(["a", "x"])
expected = df[["m", "p"]]
expected.index = MultiIndex.from_arrays([df["a"], df["x"]], names=["a", "x"])
tm.assert_frame_equal(result, expected)
def test_set_columns(self, float_string_frame):
cols = Index(np.arange(len(float_string_frame.columns)))
float_string_frame.columns = cols
with pytest.raises(ValueError, match="Length mismatch"):
float_string_frame.columns = cols[::2]
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range("2011/01/01", periods=6, freq="M", tz="US/Eastern")
idx2 = date_range("2013", periods=6, freq="A", tz="Asia/Tokyo")
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.reindex(idx2)
tm.assert_index_equal(df.index, idx2)
# GH 11314
# with tz
index = date_range(
datetime(2015, 10, 1), datetime(2015, 10, 1, 23), freq="H", tz="US/Eastern"
)
df = DataFrame(np.random.randn(24, 1), columns=["a"], index=index)
new_index = date_range(
datetime(2015, 10, 2), datetime(2015, 10, 2, 23), freq="H", tz="US/Eastern"
)
result = df.set_index(new_index)
assert result.index.freq == index.freq
# Renaming
def test_rename(self, float_frame):
mapping = {"A": "a", "B": "b", "C": "c", "D": "d"}
renamed = float_frame.rename(columns=mapping)
renamed2 = float_frame.rename(columns=str.lower)
tm.assert_frame_equal(renamed, renamed2)
tm.assert_frame_equal(
renamed2.rename(columns=str.upper), float_frame, check_names=False
)
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["foo", "bar"]))
renamed = df.rename(index=str.upper)
tm.assert_index_equal(renamed.index, Index(["BAR", "FOO"]))
# have to pass something
with pytest.raises(TypeError, match="must pass an index to rename"):
float_frame.rename()
# partial columns
renamed = float_frame.rename(columns={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.columns, Index(["A", "B", "foo", "bar"]))
# other axis
renamed = float_frame.T.rename(index={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.index, Index(["A", "B", "foo", "bar"]))
# index with name
index = Index(["foo", "bar"], name="name")
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["bar", "foo"], name="name"))
assert renamed.index.name == renamer.index.name
def test_rename_axis_inplace(self, float_frame):
# GH 15704
expected = float_frame.rename_axis("foo")
result = float_frame.copy()
no_return = result.rename_axis("foo", inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
expected = float_frame.rename_axis("bar", axis=1)
result = float_frame.copy()
no_return = result.rename_axis("bar", axis=1, inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
def test_rename_axis_raises(self):
# https://github.com/pandas-dev/pandas/issues/17833
df = DataFrame({"A": [1, 2], "B": [1, 2]})
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis(id, axis=0)
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis({0: 10, 1: 20}, axis=0)
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis(id, axis=1)
with pytest.raises(ValueError, match="Use `.rename`"):
df["A"].rename_axis(id)
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
df = DataFrame(
{"x": [i for i in range(len(mi))], "y": [i * 10 for i in range(len(mi))]},
index=mi,
)
# Test for rename of the Index object of columns
result = df.rename_axis("cols", axis=1)
tm.assert_index_equal(result.columns, Index(["x", "y"], name="cols"))
# Test for rename of the Index object of columns using dict
result = result.rename_axis(columns={"cols": "new"}, axis=1)
tm.assert_index_equal(result.columns, Index(["x", "y"], name="new"))
# Test for renaming index using dict
result = df.rename_axis(index={"ll": "foo"})
assert result.index.names == ["foo", "nn"]
# Test for renaming index using a function
result = df.rename_axis(index=str.upper, axis=0)
assert result.index.names == ["LL", "NN"]
# Test for renaming index providing complete list
result = df.rename_axis(index=["foo", "goo"])
assert result.index.names == ["foo", "goo"]
# Test for changing index and columns at same time
sdf = df.reset_index().set_index("nn").drop(columns=["ll", "y"])
result = sdf.rename_axis(index="foo", columns="meh")
assert result.index.name == "foo"
assert result.columns.name == "meh"
# Test different error cases
with pytest.raises(TypeError, match="Must pass"):
df.rename_axis(index="wrong")
with pytest.raises(ValueError, match="Length of names"):
df.rename_axis(index=["wrong"])
with pytest.raises(TypeError, match="bogus"):
df.rename_axis(bogus=None)
@pytest.mark.parametrize(
"kwargs, rename_index, rename_columns",
[
({"mapper": None, "axis": 0}, True, False),
({"mapper": None, "axis": 1}, False, True),
({"index": None}, True, False),
({"columns": None}, False, True),
({"index": None, "columns": None}, True, True),
({}, False, False),
],
)
def test_rename_axis_none(self, kwargs, rename_index, rename_columns):
# GH 25034
index = Index(list("abc"), name="foo")
columns = Index(["col1", "col2"], name="bar")
data = np.arange(6).reshape(3, 2)
df = | DataFrame(data, index, columns) | pandas.DataFrame |
import os
import itertools
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
from datetime import datetime
def basic_prediction():
return
def forecasting(df_blocks_):
df_blocks_['date'] = | pd.to_datetime(df_blocks_['date']) | pandas.to_datetime |
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
df.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
df.info(buf=buf, null_counts=True)
assert "non-null" in buf.getvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
df.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.max_seq_items", 1):
assert len(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("display.max_rows", None):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("display.max_rows", None):
with option_context("display.max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({"unicode": unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{"b": ["あ", "いいい", "¡¡", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_with_column_specific_col_space_raises(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
msg = (
"Col_space length\\(\\d+\\) should match "
"DataFrame number of columns\\(\\d+\\)"
)
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40])
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40, 50, 60])
msg = "unknown column"
with pytest.raises(ValueError, match=msg):
df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
def test_to_string_with_column_specific_col_space(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
# 3 separating space + each col_space for (id, a, b, c)
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
result = df.to_string(col_space=[10, 11, 12])
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
def test_to_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (has_horizontally_truncated_repr(df))
with option_context(
"display.max_rows", 15, "display.max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with option_context("display.max_rows", 8):
result = str(s)
assert "object" in result
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9})
with option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
Timestamp("2011-01-01", tz="US/Eastern")
] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00+09:00 1\n"
"1 2011-01-01 00:00:00+09:00 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
df = DataFrame({"A": range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split("\n")
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({"c/\u03c3": Series({"test": np.nan})})
str(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath("io", "parser", "data", "unicode_series.csv")
df = read_csv(filepath, header=None, encoding="latin1")
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({"foo": [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
fmt.set_option("display.max_rows", 1)
df = DataFrame(columns=["a", "b", "c"], index=index)
repr(df)
repr(df.T)
fmt.set_option("display.max_rows", 200)
def test_wide_repr(self):
with option_context(
"mode.sim_interactive",
True,
"display.show_dimensions",
True,
"display.max_columns",
20,
):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
assert f"10 rows x {max_cols - 1} columns" in rep_str
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_columns(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
df = DataFrame(
np.random.randn(5, 3), columns=["a" * 90, "b" * 90, "c" * 90]
)
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = "DataFrame Index"
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "DataFrame Index" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)), index=midx)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "Level 0 Level 1" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex_cols(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(
tm.rands_array(25, (10, max_cols - 1)), index=midx, columns=mcols
)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150, "display.max_columns", 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_unicode(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_long_columns(self):
with option_context("mode.sim_interactive", True):
df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
result = repr(df)
assert "ccccc" in result
assert "ddddd" in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=[f"s{x:04d}" for x in range(n)],
dtype="int64",
)
import re
str_rep = str(s)
nmatches = len(re.findall("dtype", str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame(
{
"id1": {0: "1a3", 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: "78d", 1: "79d"},
"value": {0: 123, 1: 64},
}
)
# multi-index
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# index
y = df.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nd67 9h4 79d 64"
)
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(["id1", "id2"]).set_index("id3", append=True)
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nNaN 9h4 79d 64"
)
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index(["id2", "id3"])
result = y.to_string()
expected = (
" id1 value\nid2 id3 \n"
"NaN 78d 1a3 123\n 79d 9h4 64"
)
assert result == expected
df = DataFrame(
{
"id1": {0: np.nan, 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: np.nan, 1: "79d"},
"value": {0: 123, 1: 64},
}
)
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"NaN NaN NaN 123\n9h4 d67 79d 64"
)
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)},
index=np.arange(200),
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
# print in right order
result = biggie.to_string(
columns=["B", "A"], col_space=17, float_format="%.5f".__mod__
)
lines = result.split("\n")
header = lines[0].strip().split()
joined = "\n".join([re.sub(r"\s+", " ", x).strip() for x in lines[1:]])
recons = read_csv(StringIO(joined), names=header, header=None, sep=" ")
tm.assert_series_equal(recons["B"], biggie["B"])
assert recons["A"].count() == biggie["A"].count()
assert (np.abs(recons["A"].dropna() - biggie["A"].dropna()) < 0.1).all()
# expected = ['B', 'A']
# assert header == expected
result = biggie.to_string(columns=["A"], col_space=17)
header = result.split("\n")[0].strip().split()
expected = ["A"]
assert header == expected
biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
biggie.to_string(columns=["B", "A"], float_format=str)
biggie.to_string(columns=["B", "A"], col_space=12, float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=["X", "Y"])
expected = " X Y\n0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
msg = "Writing 2 cols but got 1 aliases"
with pytest.raises(ValueError, match=msg):
df.to_string(header=["X"])
def test_to_string_no_index(self):
# GH 16839, GH 13032
df = DataFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]})
df_s = df.to_string(index=False)
# Leading space is expected for positive numbers.
expected = " x y z\n11 33 AAA\n22 -44 "
assert df_s == expected
df_s = df[["y", "x", "z"]].to_string(index=False)
expected = " y x z\n 33 11 AAA\n-44 22 "
assert df_s == expected
def test_to_string_line_width_no_index(self):
# GH 13998, GH 22505
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
assert df_s == expected
def test_to_string_float_formatting(self):
tm.reset_display_options()
fmt.set_option(
"display.precision",
5,
"display.column_space",
12,
"display.notebook_repr_html",
False,
)
df = DataFrame(
{"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]}
)
df_s = df.to_string()
if _three_digit_exp():
expected = (
" x\n0 0.00000e+000\n1 2.50000e-001\n"
"2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n"
"5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n"
"8 -1.00000e+006"
)
else:
expected = (
" x\n0 0.00000e+00\n1 2.50000e-01\n"
"2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n"
"5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n"
"8 -1.00000e+06"
)
assert df_s == expected
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string()
expected = " x\n0 3234.000\n1 0.253"
assert df_s == expected
tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({"x": [1e9, 0.2512]})
df_s = df.to_string()
if _three_digit_exp():
expected = " x\n0 1.000000e+009\n1 2.512000e-001"
else:
expected = " x\n0 1.000000e+09\n1 2.512000e-01"
assert df_s == expected
def test_to_string_float_format_no_fixed_width(self):
# GH 21625
df = DataFrame({"x": [0.19999]})
expected = " x\n0 0.200"
assert df.to_string(float_format="%.3f") == expected
# GH 22270
df = DataFrame({"x": [100.0]})
expected = " x\n0 100"
assert df.to_string(float_format="%.0f") == expected
def test_to_string_small_float_values(self):
df = DataFrame({"a": [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if _three_digit_exp():
expected = (
" a\n"
"0 1.500000e+000\n"
"1 1.000000e-017\n"
"2 -5.500000e-007"
)
else:
expected = (
" a\n"
"0 1.500000e+00\n"
"1 1.000000e-17\n"
"2 -5.500000e-07"
)
assert result == expected
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = " 0\n0 0\n1 0\n2 -0"
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.arange(5), index=index)
result = df.to_string()
expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4"
assert result == expected
def test_to_string_complex_float_formatting(self):
# GH #25514, 25745
with option_context("display.precision", 5):
df = DataFrame(
{
"x": [
(0.4467846931321966 + 0.0715185102060818j),
(0.2739442392974528 + 0.23515228785438969j),
(0.26974928742135185 + 0.3250604054898979j),
(-1j),
]
}
)
result = df.to_string()
expected = (
" x\n0 0.44678+0.07152j\n"
"1 0.27394+0.23515j\n"
"2 0.26975+0.32506j\n"
"3 -0.00000-1.00000j"
)
assert result == expected
def test_to_string_ascii_error(self):
data = [
(
"0 ",
" .gitignore ",
" 5 ",
" \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2",
)
]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({"x": [-15, 20, 25, -35]})
assert issubclass(df["x"].dtype.type, np.integer)
output = df.to_string()
expected = " x\n0 -15\n1 20\n2 25\n3 -35"
assert output == expected
def test_to_string_index_formatter(self):
df = DataFrame([range(5), range(5, 10), range(10, 15)])
rs = df.to_string(formatters={"__index__": lambda x: "abc"[x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
assert rs == xp
def test_to_string_left_justify_cols(self):
tm.reset_display_options()
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string(justify="left")
expected = " x \n0 3234.000\n1 0.253"
assert df_s == expected
def test_to_string_format_na(self):
tm.reset_display_options()
df = DataFrame(
{
"A": [np.nan, -1, -2.1234, 3, 4],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0000 foo\n"
"2 -2.1234 foooo\n"
"3 3.0000 fooooo\n"
"4 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [np.nan, -1.0, -2.0, 3.0, 4.0],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0 foo\n"
"2 -2.0 foooo\n"
"3 3.0 fooooo\n"
"4 4.0 bar"
)
assert result == expected
def test_to_string_format_inf(self):
# Issue #24861
tm.reset_display_options()
df = DataFrame(
{
"A": [-np.inf, np.inf, -1, -2.1234, 3, 4],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0000 foo\n"
"3 -2.1234 foooo\n"
"4 3.0000 fooooo\n"
"5 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [-np.inf, np.inf, -1.0, -2.0, 3.0, 4.0],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0 foo\n"
"3 -2.0 foooo\n"
"4 3.0 fooooo\n"
"5 4.0 bar"
)
assert result == expected
def test_to_string_decimal(self):
# Issue #23614
df = DataFrame({"A": [6.0, 3.1, 2.2]})
expected = " A\n0 6,0\n1 3,1\n2 2,2"
assert df.to_string(decimal=",") == expected
def test_to_string_line_width(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
s = df.to_string(line_width=80)
assert max(len(line) for line in s.split("\n")) == 80
def test_show_dimensions(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
True,
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
False,
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
with option_context(
"display.max_rows",
2,
"display.max_columns",
2,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
def test_repr_html(self, float_frame):
df = float_frame
df._repr_html_()
fmt.set_option("display.max_rows", 1, "display.max_columns", 1)
df._repr_html_()
fmt.set_option("display.notebook_repr_html", False)
df._repr_html_()
tm.reset_display_options()
df = DataFrame([[1, 2], [3, 4]])
fmt.set_option("display.show_dimensions", True)
assert "2 rows" in df._repr_html_()
fmt.set_option("display.show_dimensions", False)
assert "2 rows" not in df._repr_html_()
tm.reset_display_options()
def test_repr_html_mathjax(self):
df = DataFrame([[1, 2], [3, 4]])
assert "tex2jax_ignore" not in df._repr_html_()
with option_context("display.html.use_mathjax", False):
assert "tex2jax_ignore" in df._repr_html_()
def test_repr_html_wide(self):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in wide_df._repr_html_()
def test_repr_html_wide_multiindex_cols(self):
max_cols = 20
mcols = MultiIndex.from_product(
[np.arange(max_cols // 2), ["foo", "bar"]], names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
reg_repr = df._repr_html_()
assert "..." not in reg_repr
mcols = MultiIndex.from_product(
(np.arange(1 + (max_cols // 2)), ["foo", "bar"]), names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_repr_html_long(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert str(41 + max_rows // 2) in reg_repr
h = max_rows + 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
long_repr = df._repr_html_()
assert ".." in long_repr
assert str(41 + max_rows // 2) not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_float(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert f"<td>{40 + h}</td>" in reg_repr
h = max_rows + 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
long_repr = df._repr_html_()
assert ".." in long_repr
assert "<td>31</td>" not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_long_multiindex(self):
max_rows = 60
max_L1 = max_rows // 2
tuples = list(itertools.product(np.arange(max_L1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(np.random.randn(max_L1 * 2, 2), index=idx, columns=["A", "B"])
with option_context("display.max_rows", 60, "display.max_columns", 20):
reg_repr = df._repr_html_()
assert "..." not in reg_repr
tuples = list(itertools.product(np.arange(max_L1 + 1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(
np.random.randn((max_L1 + 1) * 2, 2), index=idx, columns=["A", "B"]
)
long_repr = df._repr_html_()
assert "..." in long_repr
def test_repr_html_long_and_wide(self):
max_cols = 20
max_rows = 60
h, w = max_rows - 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
h, w = max_rows + 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_info_repr(self):
# GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
# the terminal size to ensure that we try to print something "too big"
term_width, term_height = get_terminal_size()
max_rows = 60
max_cols = 20 + (max(term_width, 80) - 80) // 4
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_vertically_truncated_repr(df)
with option_context("display.large_repr", "info"):
assert has_info_repr(df)
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_horizontally_truncated_repr(df)
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert has_info_repr(df)
def test_info_repr_max_cols(self):
# GH #6939
df = DataFrame(np.random.randn(10, 5))
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
4,
):
assert has_non_verbose_info_repr(df)
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
5,
):
assert not has_non_verbose_info_repr(df)
# test verbose overrides
# fmt.set_option('display.max_info_columns', 4) # exceeded
def test_info_repr_html(self):
max_rows = 60
max_cols = 20
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert r"<class" not in df._repr_html_()
with option_context("display.large_repr", "info"):
assert r"<class" in df._repr_html_()
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert "<class" not in df._repr_html_()
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert "<class" in df._repr_html_()
def test_fake_qtconsole_repr_html(self, float_frame):
df = float_frame
def get_ipython():
return {"config": {"KernelApp": {"parent_appname": "ipython-qtconsole"}}}
repstr = df._repr_html_()
assert repstr is not None
fmt.set_option("display.max_rows", 5, "display.max_columns", 2)
repstr = df._repr_html_()
assert "class" in repstr # info fallback
tm.reset_display_options()
def test_pprint_pathological_object(self):
"""
If the test fails, it at least won't hang.
"""
class A:
def __getitem__(self, key):
return 3 # obviously simplified
df = DataFrame([A()])
repr(df) # just don't die
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
skip = True
for line in repr(DataFrame({"A": vals})).split("\n")[:-2]:
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert ("+010" in line) or skip
else:
assert ("+10" in line) or skip
skip = False
@pytest.mark.parametrize(
"data, expected",
[
(["3.50"], "0 3.50\ndtype: object"),
([1.20, "1.00"], "0 1.2\n1 1.00\ndtype: object"),
([np.nan], "0 NaN\ndtype: float64"),
([None], "0 None\ndtype: object"),
(["3.50", np.nan], "0 3.50\n1 NaN\ndtype: object"),
([3.50, np.nan], "0 3.5\n1 NaN\ndtype: float64"),
([3.50, np.nan, "3.50"], "0 3.5\n1 NaN\n2 3.50\ndtype: object"),
([3.50, None, "3.50"], "0 3.5\n1 None\n2 3.50\ndtype: object"),
],
)
def test_repr_str_float_truncation(self, data, expected):
# GH#38708
series = Series(data)
result = repr(series)
assert result == expected
@pytest.mark.parametrize(
"float_format,expected",
[
("{:,.0f}".format, "0 1,000\n1 test\ndtype: object"),
("{:.4f}".format, "0 1000.0000\n1 test\ndtype: object"),
],
)
def test_repr_float_format_in_object_col(self, float_format, expected):
# GH#40024
df = Series([1000.0, "test"])
with option_context("display.float_format", float_format):
result = repr(df)
assert result == expected
def test_dict_entries(self):
df = DataFrame({"A": [{"a": 1, "b": 2}]})
val = df.to_string()
assert "'a': 1" in val
assert "'b': 2" in val
def test_categorical_columns(self):
# GH35439
data = [[4, 2], [3, 2], [4, 3]]
cols = ["aaaaaaaaa", "b"]
df = DataFrame(data, columns=cols)
df_cat_cols = DataFrame(data, columns=pd.CategoricalIndex(cols))
assert df.to_string() == df_cat_cols.to_string()
def test_period(self):
# GH 12615
df = DataFrame(
{
"A": pd.period_range("2013-01", periods=4, freq="M"),
"B": [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
pd.Period("2011-04", freq="M"),
],
"C": list("abcd"),
}
)
exp = (
" A B C\n"
"0 2013-01 2011-01 a\n"
"1 2013-02 2011-02-01 b\n"
"2 2013-03 2011-03-01 09:00 c\n"
"3 2013-04 2011-04 d"
)
assert str(df) == exp
@pytest.mark.parametrize(
"length, max_rows, min_rows, expected",
[
(10, 10, 10, 10),
(10, 10, None, 10),
(10, 8, None, 8),
(20, 30, 10, 30), # max_rows > len(frame), hence max_rows
(50, 30, 10, 10), # max_rows < len(frame), hence min_rows
(100, 60, 10, 10), # same
(60, 60, 10, 60), # edge case
(61, 60, 10, 10), # edge case
],
)
def test_max_rows_fitted(self, length, min_rows, max_rows, expected):
"""Check that display logic is correct.
GH #37359
See description here:
https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options
"""
formatter = fmt.DataFrameFormatter(
DataFrame(np.random.rand(length, 3)),
max_rows=max_rows,
min_rows=min_rows,
)
result = formatter.max_rows_fitted
assert result == expected
def gen_series_formatting():
s1 = Series(["a"] * 100)
s2 = Series(["ab"] * 100)
s3 = Series(["a", "ab", "abc", "abcd", "abcde", "abcdef"])
s4 = s3[::-1]
test_sers = {"onel": s1, "twol": s2, "asc": s3, "desc": s4}
return test_sers
class TestSeriesFormatting:
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
def test_repr_unicode(self):
s = Series(["\u03c3"] * 10)
repr(s)
a = Series(["\u05d0"] * 1000)
a.name = "title1"
repr(a)
def test_to_string(self):
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
assert retval is None
assert buf.getvalue().strip() == s
# pass float_format
format = "%.4f".__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split("\n")[:-1]]
expected = [format(x) for x in self.ts]
assert result == expected
# empty string
result = self.ts[:0].to_string()
assert result == "Series([], Freq: B)"
result = self.ts[:0].to_string(length=0)
assert result == "Series([], Freq: B)"
# name and length
cp = self.ts.copy()
cp.name = "foo"
result = cp.to_string(length=True, name=True, dtype=True)
last_line = result.split("\n")[-1].strip()
assert last_line == (f"Freq: B, Name: foo, Length: {len(cp)}, dtype: float64")
def test_freq_name_separation(self):
s = Series(
np.random.randn(10), index=date_range("1/1/2000", periods=10), name=0
)
result = repr(s)
assert "Freq: D, Name: 0" in result
def test_to_string_mixed(self):
s = Series(["foo", np.nan, -1.23, 4.56])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 -1.23\n" + "3 4.56"
assert result == expected
# but don't count NAs as floats
s = Series(["foo", np.nan, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 bar\n" + "3 baz"
assert result == expected
s = Series(["foo", 5, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 5\n" + "2 bar\n" + "3 baz"
assert result == expected
def test_to_string_float_na_spacing(self):
s = Series([0.0, 1.5678, 2.0, -3.0, 4.0])
s[::2] = np.nan
result = s.to_string()
expected = (
"0 NaN\n"
+ "1 1.5678\n"
+ "2 NaN\n"
+ "3 -3.0000\n"
+ "4 NaN"
)
assert result == expected
def test_to_string_without_index(self):
# GH 11729 Test index=False option
s = Series([1, 2, 3, 4])
result = s.to_string(index=False)
expected = "1\n" + "2\n" + "3\n" + "4"
assert result == expected
def test_unicode_name_in_footer(self):
s = Series([1, 2], name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf = fmt.SeriesFormatter(s, name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf._get_footer() # should not raise exception
def test_east_asian_unicode_series(self):
# not aligned properly because of east asian width
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = "あ a\nいい bb\nううう CCC\nええええ D\ndtype: object"
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = "a あ\nbb いい\nc ううう\nddd ええええ\ndtype: object"
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\nいいいい いい\nう ううう\nえええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"], name="おおおおおおお"
)
expected = (
"ああ あ\nいいいい いい\nう ううう\n"
"えええ ええええ\nName: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\nあああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444], index=[1, "AB", Timestamp("2011-01-01"), "あああ"]
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = (
"あ a\nいい bb\nううう CCC\n"
"ええええ D\ndtype: object"
)
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = (
"a あ\nbb いい\nc ううう\n"
"ddd ええええ\ndtype: object"
)
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"],
index=["ああ", "いいいい", "う", "えええ"],
name="おおおおおおお",
)
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\n"
"Name: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\n"
"dtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444],
index=[1, "AB", Timestamp("2011-01-01"), "あああ"],
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n"
" ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# ambiguous unicode
s = Series(
["¡¡", "い¡¡", "ううう", "ええええ"], index=["ああ", "¡¡¡¡いい", "¡¡", "えええ"]
)
expected = (
"ああ ¡¡\n"
"¡¡¡¡いい い¡¡\n"
"¡¡ ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
for line in repr(Series(vals)).split("\n"):
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert "+010" in line
else:
assert "+10" in line
def test_datetimeindex(self):
index = date_range("20130102", periods=6)
s = Series(1, index=index)
result = s.to_string()
assert "2013-01-02" in result
# nat in index
s2 = Series(2, index=[Timestamp("20130111"), NaT])
s = s2.append(s)
result = s.to_string()
assert "NaT" in result
# nat in summary
result = str(s2.index)
assert "NaT" in result
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
s1 = Series(date_range(start=start_date, freq="D", periods=5))
result = str(s1)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
s2 = Series(3, index=dti)
result = str(s2.index)
assert start_date in result
def test_timedelta64(self):
from datetime import (
datetime,
timedelta,
)
Series(np.array([1100, 20], dtype="timedelta64[ns]")).to_string()
s = Series(date_range("2012-1-1", periods=3, freq="D"))
# GH2146
# adding NaTs
y = s - s.shift(1)
result = y.to_string()
assert "1 days" in result
assert "00:00:00" not in result
assert "NaT" in result
# with frac seconds
o = Series([datetime(2012, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +23:59:59.999850" in result
# rounding?
o = Series([datetime(2012, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +23:00:00" in result
assert "1 days 23:00:00" in result
o = Series([datetime(2012, 1, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +22:59:00" in result
assert "1 days 22:59:00" in result
o = Series([datetime(2012, 1, 1, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +22:58:59.999850" in result
assert "0 days 22:58:59.999850" in result
# neg time
td = timedelta(minutes=5, seconds=3)
s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
y = s - s2
result = y.to_string()
assert "-1 days +23:54:57" in result
td = timedelta(microseconds=550)
s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
y = s - td
result = y.to_string()
assert "2012-01-01 23:59:59.999450" in result
# no boxing of the actual elements
td = Series(pd.timedelta_range("1 days", periods=3))
result = td.to_string()
assert result == "0 1 days\n1 2 days\n2 3 days"
def test_mixed_datetime64(self):
df = DataFrame({"A": [1, 2], "B": ["2012-01-01", "2012-01-02"]})
df["B"] = pd.to_datetime(df.B)
result = repr(df.loc[0])
assert "2012-01-01" in result
def test_period(self):
# GH 12615
index = pd.period_range("2013-01", periods=6, freq="M")
s = Series(np.arange(6, dtype="int64"), index=index)
exp = (
"2013-01 0\n"
"2013-02 1\n"
"2013-03 2\n"
"2013-04 3\n"
"2013-05 4\n"
"2013-06 5\n"
"Freq: M, dtype: int64"
)
assert str(s) == exp
s = Series(index)
exp = (
"0 2013-01\n"
"1 2013-02\n"
"2 2013-03\n"
"3 2013-04\n"
"4 2013-05\n"
"5 2013-06\n"
"dtype: period[M]"
)
assert str(s) == exp
# periods with mixed freq
s = Series(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
]
)
exp = (
"0 2011-01\n1 2011-02-01\n"
"2 2011-03-01 09:00\ndtype: object"
)
assert str(s) == exp
def test_max_multi_index_display(self):
# GH 7101
# doc example (indexing.rst)
# multi-index
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
tuples = list(zip(*arrays))
index = MultiIndex.from_tuples(tuples, names=["first", "second"])
s = Series(np.random.randn(8), index=index)
with option_context("display.max_rows", 10):
assert len(str(s).split("\n")) == 10
with option_context("display.max_rows", 3):
assert len(str(s).split("\n")) == 5
with option_context("display.max_rows", 2):
assert len(str(s).split("\n")) == 5
with option_context("display.max_rows", 1):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 0):
assert len(str(s).split("\n")) == 10
# index
s = Series(np.random.randn(8), None)
with option_context("display.max_rows", 10):
assert len(str(s).split("\n")) == 9
with option_context("display.max_rows", 3):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 2):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 1):
assert len(str(s).split("\n")) == 3
with option_context("display.max_rows", 0):
assert len(str(s).split("\n")) == 9
# Make sure #8532 is fixed
def test_consistent_format(self):
s = Series([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9999, 1, 1] * 10)
with option_context("display.max_rows", 10, "display.show_dimensions", False):
res = repr(s)
exp = (
"0 1.0000\n1 1.0000\n2 1.0000\n3 "
"1.0000\n4 1.0000\n ... \n125 "
"1.0000\n126 1.0000\n127 0.9999\n128 "
"1.0000\n129 1.0000\ndtype: float64"
)
assert res == exp
def chck_ncols(self, s):
with option_context("display.max_rows", 10):
res = repr(s)
lines = res.split("\n")
lines = [
line for line in repr(s).split("\n") if not re.match(r"[^\.]*\.+", line)
][:-1]
ncolsizes = len({len(line.strip()) for line in lines})
assert ncolsizes == 1
def test_format_explicit(self):
test_sers = gen_series_formatting()
with option_context("display.max_rows", 4, "display.show_dimensions", False):
res = repr(test_sers["onel"])
exp = "0 a\n1 a\n ..\n98 a\n99 a\ndtype: object"
assert exp == res
res = repr(test_sers["twol"])
exp = "0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype: object"
assert exp == res
res = repr(test_sers["asc"])
exp = (
"0 a\n1 ab\n ... \n4 abcde\n5 "
"abcdef\ndtype: object"
)
assert exp == res
res = repr(test_sers["desc"])
exp = (
"5 abcdef\n4 abcde\n ... \n1 ab\n0 "
"a\ndtype: object"
)
assert exp == res
def test_ncols(self):
test_sers = gen_series_formatting()
for s in test_sers.values():
self.chck_ncols(s)
def test_max_rows_eq_one(self):
s = Series(range(10), dtype="int64")
with option_context("display.max_rows", 1):
strrepr = repr(s).split("\n")
exp1 = ["0", "0"]
res1 = strrepr[0].split()
assert exp1 == res1
exp2 = [".."]
res2 = strrepr[1].split()
assert exp2 == res2
def test_truncate_ndots(self):
def getndots(s):
return len(re.match(r"[^\.]*(\.*)", s).groups()[0])
s = Series([0, 2, 3, 6])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace("\n", "")
assert getndots(strrepr) == 2
s = Series([0, 100, 200, 400])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace("\n", "")
assert getndots(strrepr) == 3
def test_show_dimensions(self):
# gh-7117
s = Series(range(5))
assert "Length" not in repr(s)
with option_context("display.max_rows", 4):
assert "Length" in repr(s)
with option_context("display.show_dimensions", True):
assert "Length" in repr(s)
with option_context("display.max_rows", 4, "display.show_dimensions", False):
assert "Length" not in repr(s)
def test_repr_min_rows(self):
s = Series(range(20))
# default setting no truncation even if above min_rows
assert ".." not in repr(s)
s = Series(range(61))
# default of max_rows 60 triggers truncation if above
assert ".." in repr(s)
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(s)
assert "2 " not in repr(s)
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(s)
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(s)
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(s)
def test_to_string_name(self):
s = Series(range(100), dtype="int64")
s.name = "myser"
res = s.to_string(max_rows=2, name=True)
exp = "0 0\n ..\n99 99\nName: myser"
assert res == exp
res = s.to_string(max_rows=2, name=False)
exp = "0 0\n ..\n99 99"
assert res == exp
def test_to_string_dtype(self):
s = Series(range(100), dtype="int64")
res = s.to_string(max_rows=2, dtype=True)
exp = "0 0\n ..\n99 99\ndtype: int64"
assert res == exp
res = s.to_string(max_rows=2, dtype=False)
exp = "0 0\n ..\n99 99"
assert res == exp
def test_to_string_length(self):
s = Series(range(100), dtype="int64")
res = s.to_string(max_rows=2, length=True)
exp = "0 0\n ..\n99 99\nLength: 100"
assert res == exp
def test_to_string_na_rep(self):
s = Series(index=range(100), dtype=np.float64)
res = s.to_string(na_rep="foo", max_rows=2)
exp = "0 foo\n ..\n99 foo"
assert res == exp
def test_to_string_float_format(self):
s = Series(range(10), dtype="float64")
res = s.to_string(float_format=lambda x: f"{x:2.1f}", max_rows=2)
exp = "0 0.0\n ..\n9 9.0"
assert res == exp
def test_to_string_header(self):
s = Series(range(10), dtype="int64")
s.index.name = "foo"
res = s.to_string(header=True, max_rows=2)
exp = "foo\n0 0\n ..\n9 9"
assert res == exp
res = s.to_string(header=False, max_rows=2)
exp = "0 0\n ..\n9 9"
assert res == exp
def test_to_string_multindex_header(self):
# GH 16718
df = DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]}).set_index(["a", "b"])
res = df.to_string(header=["r1", "r2"])
exp = " r1 r2\na b \n0 1 2 3"
assert res == exp
def test_to_string_empty_col(self):
# GH 13653
s = Series(["", "Hello", "World", "", "", "Mooooo", "", ""])
res = s.to_string(index=False)
exp = " \n Hello\n World\n \n \nMooooo\n \n "
assert re.match(exp, res)
class TestGenericArrayFormatter:
def test_1d_array(self):
# GenericArrayFormatter is used on types for which there isn't a dedicated
# formatter. np.bool_ is one of those types.
obj = fmt.GenericArrayFormatter(np.array([True, False]))
res = obj.get_result()
assert len(res) == 2
# Results should be right-justified.
assert res[0] == " True"
assert res[1] == " False"
def test_2d_array(self):
obj = fmt.GenericArrayFormatter(np.array([[True, False], [False, True]]))
res = obj.get_result()
assert len(res) == 2
assert res[0] == " [True, False]"
assert res[1] == " [False, True]"
def test_3d_array(self):
obj = fmt.GenericArrayFormatter(
np.array([[[True, True], [False, False]], [[False, True], [True, False]]])
)
res = obj.get_result()
assert len(res) == 2
assert res[0] == " [[True, True], [False, False]]"
assert res[1] == " [[False, True], [True, False]]"
def test_2d_extension_type(self):
# GH 33770
# Define a stub extension type with just enough code to run Series.__repr__()
class DtypeStub(pd.api.extensions.ExtensionDtype):
@property
def type(self):
return np.ndarray
@property
def name(self):
return "DtypeStub"
class ExtTypeStub(pd.api.extensions.ExtensionArray):
def __len__(self):
return 2
def __getitem__(self, ix):
return [ix == 1, ix == 0]
@property
def dtype(self):
return DtypeStub()
series = Series(ExtTypeStub())
res = repr(series) # This line crashed before #33770 was fixed.
expected = "0 [False True]\n" + "1 [ True False]\n" + "dtype: DtypeStub"
assert res == expected
def _three_digit_exp():
return f"{1.7e8:.4g}" == "1.7e+008"
class TestFloatArrayFormatter:
def test_misc(self):
obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))
result = obj.get_result()
assert len(result) == 0
def test_format(self):
obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64))
result = obj.get_result()
assert result[0] == " 12.0"
assert result[1] == " 0.0"
def test_output_display_precision_trailing_zeroes(self):
# Issue #20359: trimming zeros while there is no decimal point
# Happens when display precision is set to zero
with option_context("display.precision", 0):
s = | Series([840.0, 4200.0]) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
from collections import OrderedDict
import os
import sys
import math
import functools
import cv2
import nltk
import pandas
import numpy as np
from PIL import Image, ImageEnhance
import torch
import matplotlib
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.colors import Normalize, LinearSegmentedColormap
# +
# plotting
imageDims = (14, 14)
figureImageDims = (2, 3)
figureTableDims = (5, 4)
fontScale = 1
# set transparent mask for low attention areas
# cdict = plt.get_cmap("gnuplot2")._segmentdata
cdict = {
"red": ((0.0, 0.0, 0.0), (0.6, 0.8, 0.8), (1.0, 1, 1)),
"green": ((0.0, 0.0, 0.0), (0.6, 0.8, 0.8), (1.0, 1, 1)),
"blue": ((0.0, 0.0, 0.0), (0.6, 0.8, 0.8), (1.0, 1, 1))
}
cdict["alpha"] = ((0.0, 0.35, 0.35), (1.0, 0.65, 0.65))
plt.register_cmap(name="custom", data=cdict)
def showTableAtt(table, words, tax=None):
'''
Question attention as sns heatmap
'''
if tax is None:
fig2, bx = plt.subplots(1, 1)
bx.cla()
else:
bx = tax
sns.set(font_scale=fontScale)
steps = len(table)
# traspose table
table = np.transpose(table)
tableMap = pandas.DataFrame(data=table,
columns=[i for i in range(1, steps + 1)],
index=words)
bx = sns.heatmap(tableMap,
cmap="Purples",
cbar=False,
linewidths=.5,
linecolor="gray",
square=True,
# ax=bx,
)
# # x ticks
bx.xaxis.tick_top()
locs, labels = plt.xticks()
plt.setp(labels, rotation=0)
# y ticks
locs, labels = plt.yticks()
plt.setp(labels, rotation=0)
# ### Visualizing Image Atts
def sigmoid(x):
return 1 / (1 + np.exp(-x))
dx, dy = 0.05, 0.05
x = np.arange(-1.5, 1.5, dx)
y = np.arange(-1.0, 1.0, dy)
X, Y = np.meshgrid(x, y)
extent = np.min(x), np.max(x), np.min(y), np.max(y)
def show_img_att(img, att, ax, dim=None):
ax.cla()
if dim is None:
dim = int(math.sqrt(len(att)))
ax.imshow(img, interpolation="nearest", extent=extent)
ax.imshow(att.reshape((dim, dim)),
cmap=plt.get_cmap('custom'),
interpolation="bicubic",
extent=extent,
)
ax.set_axis_off()
plt.axis("off")
ax.set_aspect("auto")
def showImgAtt(img, atts, step, ax):
ax.cla()
dim = int(math.sqrt(len(atts[0][0])))
img1 = ax.imshow(img, interpolation="nearest", extent=extent)
att = atts[step][0]
low = att.min().item()
high = att.max().item()
att = sigmoid(((att - low) / (high - low)) * 20 - 10)
ax.imshow(att.reshape((dim, dim)),
cmap=plt.get_cmap('custom'),
interpolation="bicubic",
extent=extent,
)
ax.set_axis_off()
plt.axis("off")
ax.set_aspect("auto")
def showImgAtts(atts, impath):
img = imread(impath)
length = len(atts)
# show images
for j in range(length):
fig, ax = plt.subplots()
fig.set_figheight(figureImageDims[0])
fig.set_figwidth(figureImageDims[1])
showImgAtt(img, atts, j, ax)
plt.subplots_adjust(bottom=0, top=1, left=0, right=1)
def setlabel(ax, label, loc=2, borderpad=0.6, **kwargs):
legend = ax.get_legend()
if legend:
ax.add_artist(legend)
line, = ax.plot(np.NaN, np.NaN, color='none', label=label)
label_legend = ax.legend(
handles=[line],
loc=loc,
handlelength=0,
handleheight=0,
handletextpad=0,
borderaxespad=0,
borderpad=borderpad,
frameon=False,
prop={
'size': 22,
'weight': 'bold',
},
**kwargs,
)
for text in label_legend.get_texts():
plt.setp(text, color=(1, 0, 1))
label_legend.remove()
ax.add_artist(label_legend)
line.remove()
def get_image(image_path, enhance=True):
image = Image.open(image_path).convert('RGB')
if enhance:
enhancer = ImageEnhance.Brightness(image)
image = enhancer.enhance(0.5)
enhancer = ImageEnhance.Contrast(image)
image = enhancer.enhance(1.6)
return image
def plot_table_attn(
ax, data, columns, index,
vmin=0, vmax=None, tick_position='top',
):
df = | pandas.DataFrame(data=data, columns=columns, index=index) | pandas.DataFrame |
"""
Tests for the utils module from pandas tools
"""
import pytest
import numpy as np
import pandas as pd
from itertools import product
from ..utils import *
def test_pd_to_tensor():
content = dict(
country=['UK', 'RUS'],
year=[2005, 2015, 2010],
month=['Jan', 'Feb', 'Mar', 'Apr'],
day=['Mon', 'Wed', 'Fri']
)
data = list(product(*content.values()))
columns = list(content.keys())
df = pd.DataFrame(data=data, columns=columns)
df['population'] = np.arange(df.shape[0])
df_mi = df.set_index(columns)
shape_labels = df_mi.index.names
true_shape = tuple([len(content[label]) for label in shape_labels])
true_mode_names = columns
tensor = pd_to_tensor(df=df_mi, keep_index=True)
assert tensor.shape == true_shape
assert tensor.mode_names == true_mode_names
for mode in tensor.modes:
name = mode.name
assert mode.index == content[name]
tensor = pd_to_tensor(df_mi, keep_index=False)
assert all([mode.index is None for mode in tensor.modes])
def test_tensor_to_pd():
content = dict(
country=['UK', 'RUS'],
year=[2005, 2015, 2010],
month=['Jan', 'Feb', 'Mar', 'Apr'],
day=['Mon', 'Wed', 'Fri']
)
data = list(product(*content.values()))
columns = list(content.keys())
multi_index = columns
df_base = pd.DataFrame(data=data, columns=columns)
values = np.arange(df_base.shape[0])
#----- test for default column name
value_column_name = "Values"
df_data = | pd.DataFrame(data=values, columns=[value_column_name]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 4 18:50:16 2017
@author: <NAME>
@version: 1.2
@license: MIT License
"""
import urllib.request
import time
from datetime import datetime
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
visaURL = 'https://www.checkee.info/main.php?dispdate={0:04d}-{1:02d}'
# Get current time
timestamp = time.time()
yearNow = datetime.fromtimestamp(timestamp).year
monthNow = datetime.fromtimestamp(timestamp).month
currentTime = datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
# File handle
time_string = datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d')
filename = f'VISA-Data-{time_string}.csv'
# Initialize dataframe
df = pd.DataFrame(
columns=[
'UserName',
'VisaType',
'VisaEntry',
'City',
'Major',
'VisaStatus',
'CheckDate',
'CompleteDate',
'WaitDays'
]
)
# set column datatypes
df['UserName'] = pd.Series([], dtype=np.str)
df['VisaType'] = | pd.Categorical([]) | pandas.Categorical |
import pytest
import pandas as pd
import pandas.testing as pdt
from aneris import utils
def test_remove_emissions_prefix():
assert 'foo' == utils.remove_emissions_prefix('foo')
assert 'foo' == utils.remove_emissions_prefix('Emissions|XXX|foo')
assert 'Emissions|bar|foo' == \
utils.remove_emissions_prefix('Emissions|bar|foo')
assert 'foo' == \
utils.remove_emissions_prefix('Emissions|bar|foo', gas='bar')
def test_region_agg_funky_name():
df = pd.DataFrame({
'sector': ['foo', 'foo'],
'region': ['a', 'b'],
'2010': [1.0, 4.0],
'units': ['Mt'] * 2,
'gas': ['BC'] * 2,
}).set_index(utils.df_idx).sort_index()
mapping = pd.DataFrame(
[['fOO_Bar', 'a'], ['fOO_Bar', 'b']], columns=['x', 'y'])
exp = pd.DataFrame({
'sector': ['foo'],
'region': ['fOO_Bar'],
'2010': [5.0],
'units': ['Mt'],
'gas': ['BC'],
}).set_index(utils.df_idx).sort_index()
obs = utils.agg_regions(df, rfrom='y', rto='x', mapping=mapping)
pdt.assert_frame_equal(obs, exp)
def test_no_repeat_gases():
gases = utils.all_gases
assert len(gases) == len(set(gases))
def test_gases():
var_col = pd.Series(['foo|Emissions|CH4|bar', 'Emissions|N2O|baz|zing'])
exp = pd.Series(['CH4', 'N2O'])
obs = utils.gases(var_col)
pdt.assert_series_equal(obs, exp)
def test_units():
var_col = pd.Series(['foo|Emissions|CH4|bar', 'Emissions|N2O|baz|zing'])
exp = pd.Series(['Mt CH4/yr', 'kt N2O/yr'])
obs = utils.units(var_col)
pdt.assert_series_equal(obs, exp)
def test_formatter_to_std():
df = pd.DataFrame({
'Variable': [
'CEDS+|9+ Sectors|Emissions|BC|foo|Unharmonized',
'Emissions|BC|bar|baz',
],
'Region': ['a', 'b'],
'2010': [5.0, 2.0],
'2020': [-1.0, 3.0],
'Unit': ['Mt foo/yr'] * 2,
'Model': ['foo'] * 2,
'Scenario': ['foo'] * 2,
})
fmt = utils.FormatTranslator(df.copy())
obs = fmt.to_std()
exp = pd.DataFrame({
'sector': [
'CEDS+|9+ Sectors|foo|Unharmonized',
'bar|baz',
],
'region': ['a', 'b'],
'2010': [5000.0, 2000.0],
'2020': [-1000.0, 3000.0],
'units': ['kt'] * 2,
'gas': ['BC'] * 2,
})
pdt.assert_frame_equal(obs.set_index(utils.df_idx),
exp.set_index(utils.df_idx))
def test_formatter_to_template():
df = pd.DataFrame({
'Variable': [
'CEDS+|9+ Sectors|Emissions|BC|foo|Unharmonized',
'CEDS+|9+ Sectors|Emissions|BC|bar|Unharmonized',
],
'Region': ['a', 'b'],
'2010': [5.0, 2.0],
'2020': [-1.0, 3.0],
'Unit': ['Mt BC/yr'] * 2,
'Model': ['foo'] * 2,
'Scenario': ['foo'] * 2,
}).set_index(utils.iamc_idx)
fmt = utils.FormatTranslator(df, prefix='CEDS+|9+ Sectors',
suffix='Unharmonized')
fmt.to_std()
obs = fmt.to_template()
exp = df.reindex(columns=obs.columns)
pdt.assert_frame_equal(obs, exp)
def combine_rows_df():
df = pd.DataFrame({
'sector': [
'sector1',
'sector2',
'sector1',
'extra_b',
'sector1',
],
'region': ['a', 'a', 'b', 'b', 'c'],
'2010': [1.0, 4.0, 2.0, 21, 42],
'foo': [-1.0, -4.0, 2.0, 21, 42],
'units': ['Mt'] * 5,
'gas': ['BC'] * 5,
}).set_index(utils.df_idx)
return df
def test_combine_rows_default():
df = combine_rows_df()
exp = pd.DataFrame({
'sector': [
'sector1',
'sector2',
'extra_b',
'sector1',
],
'region': ['a', 'a', 'a', 'c'],
'2010': [3.0, 4.0, 21, 42],
'foo': [1.0, -4.0, 21, 42],
'units': ['Mt'] * 4,
'gas': ['BC'] * 4,
}).set_index(utils.df_idx)
obs = utils.combine_rows(df, 'region', 'a', ['b'])
exp = exp.reindex(columns=obs.columns)
clean = lambda df: df.sort_index().reset_index()
pdt.assert_frame_equal(clean(obs), clean(exp))
def test_combine_rows_dropothers():
df = combine_rows_df()
exp = pd.DataFrame({
'sector': [
'sector1',
'sector2',
'extra_b',
'sector1',
'extra_b',
'sector1',
],
'region': ['a', 'a', 'a', 'b', 'b', 'c'],
'2010': [3.0, 4.0, 21, 2.0, 21, 42],
'foo': [1.0, -4.0, 21, 2.0, 21, 42],
'units': ['Mt'] * 6,
'gas': ['BC'] * 6,
}).set_index(utils.df_idx)
obs = utils.combine_rows(df, 'region', 'a', ['b'], dropothers=False)
exp = exp.reindex(columns=obs.columns)
clean = lambda df: df.sort_index().reset_index()
pdt.assert_frame_equal(clean(obs), clean(exp))
def test_combine_rows_sumall():
df = combine_rows_df()
exp = pd.DataFrame({
'sector': [
'sector1',
'extra_b',
'sector1',
],
'region': ['a', 'a', 'c'],
'2010': [2.0, 21, 42],
'foo': [2.0, 21, 42],
'units': ['Mt'] * 3,
'gas': ['BC'] * 3,
}).set_index(utils.df_idx)
obs = utils.combine_rows(df, 'region', 'a', ['b'], sumall=False)
exp = exp.reindex(columns=obs.columns)
clean = lambda df: df.sort_index().reset_index()
pdt.assert_frame_equal(clean(obs), clean(exp))
def test_isin():
df = combine_rows_df()
exp = pd.DataFrame({
'sector': [
'sector1',
'sector2',
'sector1',
],
'region': ['a', 'a', 'b'],
'2010': [1.0, 4.0, 2.0],
'foo': [-1.0, -4.0, 2.0],
'units': ['Mt'] * 3,
'gas': ['BC'] * 3,
}).set_index(utils.df_idx)
obs = exp.loc[
utils.isin(sector=["sector1", "sector2"], region=["a", "b", "non-existent"])
]
| pdt.assert_frame_equal(obs, exp) | pandas.testing.assert_frame_equal |
import collections
from collections import OrderedDict
import numpy as np
import pandas as pd
from pandas import Panel4D
from pandas import Panel, MultiIndex, Series
from pandas.core.groupby import DataFrameGroupBy, PanelGroupBy, BinGrouper, SeriesGroupBy
from trtools.monkey import patch, patch_prop
from trtools.core.column_panel import PanelDict, ColumnPanel
from trtools.tools.boxer import box_data
class PanelGroupByMap(object):
"""
All DataFrame in a Panel share the same index. If you have a groupby
that applies to all dataframes in a panel i.e. date groupings, then
this method will reuse the grouper indices, saving you from re-running
the groupby for each dataframe.
"""
def __init__(self, groupby):
self.groupby = groupby
self.grouper = groupby.grouper
self.obj = groupby.obj
# create the first df groupby so we can delegate from __getattr__
f_ind = groupby.obj.items[0]
self.first = DataFrameGroupBy(groupby.obj.ix[f_ind], grouper=self.grouper)
def __getattr__(self, attr):
if hasattr(self.first, attr):
return self._map_wrapper(attr)
def _map_wrapper(self, attr):
def mapper(*args, **kwargs):
return self.apply(attr, *args, **kwargs)
return mapper
def apply(self, func, *args, **kwargs):
result = OrderedDict()
for key, df in self.obj.items():
grp = DataFrameGroupBy(df, grouper=self.grouper)
f = func
if not isinstance(func, collections.Callable):
f = getattr(grp, func)
res = f(*args, **kwargs)
else:
# call the grouper.apply cuz we will box our own data
keys, data, mutated = grp.grouper.apply(f, df, grp.axis)
res = box_data(keys, data)
result[key] = res
return box_data(result)
def __call__(self, func, *args, **kwargs):
"""
shortcut for agg. kept on forgetting that
df_map.agg(lambda x: x) would work
"""
args = list(args)
args.insert(0, func)
return self.apply('apply', *args, **kwargs)
@patch_prop([PanelGroupBy], 'df_map')
def df_map(self):
return PanelGroupByMap(self)
@patch([PanelGroupBy, DataFrameGroupBy], 'foreach')
def foreach_panelgroupby(self, func, *args, **kwargs):
"""
Will func the func for each group df for each item in panel.
"""
keys = []
values = []
indices = self.grouper.indices
if isinstance(self.obj, Panel):
items = iter(self.obj.items())
else:
items = [(None, self.obj)]
results = PanelDict()
for key, df in items:
sub_results = PanelDict()
results[key] = sub_results
for date, idx in indices.items():
sub_df = df.take(idx)
res = func(df, sub_df)
keys.append((key, date))
values.append(res)
if len(results) == 1:
return list(results.values())[0]
return results
def filter_by_grouped(grouped, by, obj=None):
if obj is None:
obj = grouped.obj
index = by
if isinstance(by, np.ndarray) and by.dtype == bool:
index = by[by].index
if isinstance(grouped.grouper, BinGrouper):
return filter_bingroup_index(grouped, index, obj)
return filter_grouper_index(grouped, index, obj)
def _reverse_flatten(mapping):
rev_map = dict()
for k, vals in mapping.items():
for v in vals:
rev_map[v] = k
return rev_map
# TODO These are mixed. bingroup returns the original obj sans the bad groups
# regular groupby returns a filtered groupby object
def filter_grouper_index(grouped, index, obj):
old_groups = grouped.groups
groups = {k:v for k, v in old_groups.items() if k in index}
rmap = _reverse_flatten(groups)
return obj.groupby(rmap)
def filter_bingroup_index(grouped, index, obj):
# http://stackoverflow.com/questions/13446480/python-pandas-remove-entries-based-on-the-number-of-occurrences
# I think that overrides what i was doing...
groups = list(_bingroup_groups(grouped))
groups = collections.OrderedDict(groups)
filtered_groups = [(i, groups[i]) for i in index]
parts = []
for i, slc in filtered_groups:
# TODO: concat blocks instead of DataFrames, faster
#bit = obj._data.get_slice(slice(slc[0], slc[1]), 1)
bit = obj[slc[0]:slc[1]]
parts.append(bit)
res = pd.concat(parts)
return res
def _bingroup_groups(grouped):
grouper = grouped.grouper
data = grouped.obj
start = 0
for edge, label in zip(grouper.bins, grouper.binlabels):
yield label, (start, edge)
start = edge
if edge < len(data):
yield grouper.binlabels[-1], (edge, len(data))
@patch([PanelGroupBy, DataFrameGroupBy], 'filter_grouped')
def filter_grouped_monkey(self, by):
return filter_by_grouped(self, by)
@patch([PanelGroupBy, DataFrameGroupBy], 'process')
def panel_process(self, func, *args, **kwargs):
"""
Essentially just subsets the dataframe, runs func, and aggregates them back
"""
import collections
parts = collections.OrderedDict()
grouped = self
bins = grouped.grouper.bins
binlabels = grouped.grouper.binlabels
axis = self.axis
start = 0
for i, x in enumerate(bins):
label = binlabels[i]
sub = self.obj.ix._getitem_axis(slice(start, x), axis=axis)
res = func(sub)
parts[label] = res
start = x
return _wrap_parts(parts)
def _wrap_parts(parts):
"""
parts should be a dict where the keys are the index
"""
test = next(iter(parts.values()))
if np.isscalar(test):
return pd.Series(parts)
if isinstance(test, pd.Series):
return pd.DataFrame(list(parts.values()), index=list(parts.keys()))
if isinstance(test, dict):
# assumption is that dict is like a series
res = pd.DataFrame(list(parts.values()), index=list(parts.keys()))
if isinstance(test, collections.OrderedDict):
res = res.reindex(columns=list(test.keys()))
return res
if isinstance(test, pd.DataFrame):
return | pd.Panel(parts) | pandas.Panel |
from __future__ import division
import copy
import bt
from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy
from bt.core import FixedIncomeStrategy, HedgeSecurity, FixedIncomeSecurity
from bt.core import CouponPayingSecurity, CouponPayingHedgeSecurity
from bt.core import is_zero
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
def test_node_tree1():
# Create a regular strategy
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2, 'c3', 'c4'])
assert 'c1' in p.children
assert 'c2' in p.children
assert p['c1'] != c1
assert p['c1'] != c2
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert p == c1.parent
assert p == c2.parent
assert p == c1.root
assert p == c2.root
# Create a new parent strategy with a child sub-strategy
m = Node('m', children=[p, c1])
p = m['p']
mc1 = m['c1']
c1 = p['c1']
c2 = p['c2']
assert len(m.children) == 2
assert 'p' in m.children
assert 'c1' in m.children
assert mc1 != c1
assert p.parent == m
assert len(p.children) == 2
assert 'c1' in p.children
assert 'c2' in p.children
assert p == c1.parent
assert p == c2.parent
assert m == p.root
assert m == c1.root
assert m == c2.root
# Add a new node into the strategy
c0 = Node('c0', parent=p)
c0 = p['c0']
assert 'c0' in p.children
assert p == c0.parent
assert m == c0.root
assert len(p.children) == 3
# Add a new sub-strategy into the parent strategy
p2 = Node( 'p2', children = [c0, c1], parent=m )
p2 = m['p2']
c0 = p2['c0']
c1 = p2['c1']
assert 'p2' in m.children
assert p2.parent == m
assert len(p2.children) == 2
assert 'c0' in p2.children
assert 'c1' in p2.children
assert c0 != p['c0']
assert c1 != p['c1']
assert p2 == c0.parent
assert p2 == c1.parent
assert m == p2.root
assert m == c0.root
assert m == c1.root
def test_node_tree2():
# Just like test_node_tree1, but using the dictionary constructor
c = Node('template')
p = Node('p', children={'c1':c, 'c2':c, 'c3':'', 'c4':''})
assert 'c1' in p.children
assert 'c2' in p.children
assert p['c1'] != c
assert p['c1'] != c
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert c1.name == 'c1'
assert c2.name == 'c2'
assert p == c1.parent
assert p == c2.parent
assert p == c1.root
assert p == c2.root
def test_node_tree3():
c1 = Node('c1')
c2 = Node('c1') # Same name!
raised = False
try:
p = Node('p', children=[c1, c2, 'c3', 'c4'])
except ValueError:
raised = True
assert raised
raised = False
try:
p = Node('p', children=['c1', 'c1'])
except ValueError:
raised = True
assert raised
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2, 'c3', 'c4'])
raised = False
try:
Node('c1', parent = p )
except ValueError:
raised = True
assert raised
# This does not raise, as it's just providing an implementation of 'c3',
# which had been declared earlier
c3 = Node('c3', parent = p )
assert 'c3' in p.children
def test_integer_positions():
c1 = Node('c1')
c2 = Node('c2')
c1.integer_positions = False
p = Node('p', children=[c1, c2])
c1 = p['c1']
c2 = p['c2']
assert p.integer_positions
assert c1.integer_positions
assert c2.integer_positions
p.use_integer_positions(False)
assert not p.integer_positions
assert not c1.integer_positions
assert not c2.integer_positions
c3 = Node('c3', parent=p)
c3 = p['c3']
assert not c3.integer_positions
p2 = Node( 'p2', children = [p] )
p = p2['p']
c1 = p['c1']
c2 = p['c2']
assert p2.integer_positions
assert p.integer_positions
assert c1.integer_positions
assert c2.integer_positions
def test_strategybase_tree():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
assert len(s.children) == 2
assert 's1' in s.children
assert 's2' in s.children
assert s == s1.parent
assert s == s2.parent
def test_node_members():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
actual = s.members
assert len(actual) == 3
assert s1 in actual
assert s2 in actual
assert s in actual
actual = s1.members
assert len(actual) == 1
assert s1 in actual
actual = s2.members
assert len(actual) == 1
assert s2 in actual
def test_node_full_name():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
# we cannot access s1 and s2 directly since they are copied
# we must therefore access through s
assert s.full_name == 'p'
assert s['s1'].full_name == 'p>s1'
assert s['s2'].full_name == 'p>s2'
def test_security_setup_prices():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
# now with setup
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
def test_strategybase_tree_setup():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
assert len(s.data) == 3
assert len(c1.data) == 3
assert len(c2.data) == 3
assert len(s._prices) == 3
assert len(c1._prices) == 3
assert len(c2._prices) == 3
assert len(s._values) == 3
assert len(c1._values) == 3
assert len(c2._values) == 3
def test_strategybase_tree_adjust():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
assert s.capital == 1000
assert s.value == 1000
assert c1.value == 0
assert c2.value == 0
assert c1.weight == 0
assert c2.weight == 0
s.update(dts[0])
assert s.flows[ dts[0] ] == 1000
def test_strategybase_tree_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 100
i = 1
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert c2.price == 95
i = 2
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 100
def test_update_fails_if_price_is_nan_and_position_open():
c1 = SecurityBase('c1')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100)
data['c1'][dts[1]] = np.nan
c1.setup(data)
i = 0
# mock in position
c1._position = 100
c1.update(dts[i], data.loc[dts[i]])
# test normal case - position & non-nan price
assert c1._value == 100 * 100
i = 1
# this should fail, because we have non-zero position, and price is nan, so
# bt has no way of updating the _value
try:
c1.update(dts[i], data.loc[dts[i]])
assert False
except Exception as e:
assert str(e).startswith('Position is open')
# on the other hand, if position was 0, this should be fine, and update
# value to 0
c1._position = 0
c1.update(dts[i], data.loc[dts[i]])
assert c1._value == 0
def test_strategybase_tree_allocate():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_child_from_strategy():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate to c1
s.allocate(500, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.allocate(1000)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
s1.allocate(500)
assert s1.value == 500
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
# now allocate directly to child of child
c1.allocate(200)
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
def test_strategybase_tree_allocate_long_short():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
c1.allocate(-200)
assert c1.position == 3
assert c1.value == 300
assert c1.weight == 300.0 / 1000
assert s.capital == 1000 - 500 + 200
assert s.value == 1000
c1.allocate(-400)
assert c1.position == -1
assert c1.value == -100
assert c1.weight == -100.0 / 1000
assert s.capital == 1000 - 500 + 200 + 400
assert s.value == 1000
# close up
c1.allocate(-c1.value)
assert c1.position == 0
assert c1.value == 0
assert c1.weight == 0
assert s.capital == 1000 - 500 + 200 + 400 - 100
assert s.value == 1000
def test_strategybase_tree_allocate_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert s.price == 100
s.adjust(1000)
assert s.price == 100
assert s.value == 1000
assert s._value == 1000
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
assert s.price == 100
i = 1
s.update(dts[i], data.loc[dts[i]])
assert c1.position == 5
assert c1.value == 525
assert c1.weight == 525.0 / 1025
assert s.capital == 1000 - 500
assert s.value == 1025
assert np.allclose(s.price, 102.5)
def test_strategybase_universe():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
assert len(s.universe) == 1
assert 'c1' in s.universe
assert 'c2' in s.universe
assert s.universe['c1'][dts[i]] == 105
assert s.universe['c2'][dts[i]] == 95
# should not have children unless allocated
assert len(s.children) == 0
def test_strategybase_allocate():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
assert c1.position == 1
assert c1.value == 100
assert s.value == 1000
def test_strategybase_lazy():
# A mix of test_strategybase_universe and test_strategybase_allocate
# to make sure that assets with lazy_add work correctly.
c1 = SecurityBase('c1', multiplier=2, lazy_add=True, )
c2 = FixedIncomeSecurity('c2', lazy_add=True)
s = StrategyBase('s', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
assert len(s.universe) == 1
assert 'c1' in s.universe
assert 'c2' in s.universe
assert s.universe['c1'][dts[i]] == 105
assert s.universe['c2'][dts[i]] == 95
# should not have children unless allocated
assert len(s.children) == 0
s.adjust(1000)
s.allocate(100, 'c1')
s.allocate(100, 'c2')
c1 = s['c1']
c2 = s['c2']
assert c1.multiplier == 2
assert isinstance( c2, FixedIncomeSecurity)
def test_strategybase_close():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
assert c1.position == 1
assert c1.value == 100
assert s.value == 1000
s.close('c1')
assert c1.position == 0
assert c1.value == 0
assert s.value == 1000
def test_strategybase_flatten():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
s.allocate(100, 'c2')
c2 = s['c2']
assert c1.position == 1
assert c1.value == 100
assert c2.position == 1
assert c2.value == 100
assert s.value == 1000
s.flatten()
assert c1.position == 0
assert c1.value == 0
assert s.value == 1000
def test_strategybase_multiple_calls():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.loc[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 0
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 1
assert s.value == 1000
assert s.capital == 50
c2 = s['c2']
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update out t0
s.update(dts[i])
c2 = s['c2']
assert len(s.children) == 1
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1050
assert s.capital == 50
assert len(s.children) == 1
assert 'c2' in s.children
c2 = s['c2']
assert c2.value == 1000
assert c2.weight == 1000.0 / 1050.0
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
c1 = s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update out t1
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
assert c1 == s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 5
assert c1.value == 1100
assert c1.weight == 1100.0 / 1105
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
def test_strategybase_multiple_calls_preset_secs():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('s', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.loc[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 2
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update out t0
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1050
assert s.capital == 50
assert len(s.children) == 2
assert c2.value == 1000
assert c2.weight == 1000.0 / 1050.
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
# update out t1
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 5
assert c1.value == 1100
assert c1.weight == 1100.0 / 1105
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
def test_strategybase_multiple_calls_no_post_update():
s = StrategyBase('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.loc[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 0
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 1
assert s.value == 999
assert s.capital == 49
c2 = s['c2']
assert c2.value == 950
assert c2.weight == 950.0 / 999
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1049
assert s.capital == 49
assert len(s.children) == 1
assert 'c2' in s.children
c2 = s['c2']
assert c2.value == 1000
assert c2.weight == 1000.0 / 1049.0
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert len(s.children) == 2
assert s.value == 1047
assert s.capital == 2
c1 = s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1047
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1102
assert s.capital == 2
assert c1.value == 1100
assert c1.weight == 1100.0 / 1102
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1100
assert s.capital == 55
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1100
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1100
assert s.capital == 55
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1100
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1098
assert s.capital == 53
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1098
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1098
assert s.capital == 53
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1098
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1096
assert s.capital == 51
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1096
assert c2.price == 95
def test_strategybase_prices():
dts = pd.date_range('2010-01-01', periods=21)
rawd = [13.555, 13.75, 14.16, 13.915, 13.655,
13.765, 14.02, 13.465, 13.32, 14.65,
14.59, 14.175, 13.865, 13.865, 13.89,
13.85, 13.565, 13.47, 13.225, 13.385,
12.89]
data = pd.DataFrame(index=dts, data=rawd, columns=['a'])
s = StrategyBase('s')
s.set_commissions(lambda q, p: 1)
s.setup(data)
# buy 100 shares on day 1 - hold until end
# just enough to buy 100 shares + 1$ commission
s.adjust(1356.50)
s.update(dts[0])
# allocate all capital to child a
# a should be dynamically created and should have
# 100 shares allocated. s.capital should be 0
s.allocate(s.value, 'a')
assert s.capital == 0
assert s.value == 1355.50
assert len(s.children) == 1
aae(s.price, 99.92628, 5)
a = s['a']
assert a.position == 100
assert a.value == 1355.50
assert a.weight == 1
assert a.price == 13.555
assert len(a.prices) == 1
# update through all dates and make sure price is ok
s.update(dts[1])
aae(s.price, 101.3638, 4)
s.update(dts[2])
aae(s.price, 104.3863, 4)
s.update(dts[3])
aae(s.price, 102.5802, 4)
# finish updates and make sure ok at end
for i in range(4, 21):
s.update(dts[i])
assert len(s.prices) == 21
aae(s.prices[-1], 95.02396, 5)
aae(s.prices[-2], 98.67306, 5)
def test_fail_if_root_value_negative():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
s.setup(data)
s.adjust(-100)
# trigger update
s.update(dts[0])
assert s.bankrupt
# make sure only triggered if root negative
c1 = StrategyBase('c1')
s = StrategyBase('s', children=[c1])
c1 = s['c1']
s.setup(data)
s.adjust(1000)
c1.adjust(-100)
s.update(dts[0])
# now make it trigger
c1.adjust(-1000)
# trigger update
s.update(dts[0])
assert s.bankrupt
def test_fail_if_0_base_in_return_calc():
dts = pd.date_range('2010-01-01', periods=3)
data = | pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100) | pandas.DataFrame |
#!/usr/bin/env python
# -- coding: utf-8 --
# PAQUETES PARA CORRER OP.
import netCDF4
import pandas as pd
import numpy as np
import datetime as dt
import json
import wmf.wmf as wmf
import hydroeval
import glob
import MySQLdb
#modulo pa correr modelo
import hidrologia
from sklearn.linear_model import LinearRegression
import math
import os
#spatial
import cartopy.crs as crs
import geopandas as gpd
import pyproj
from pyproj import transform
from cartopy.feature import ShapelyFeature
import cartopy.crs as ccrs
from cartopy.io.shapereader import Reader
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import seaborn as sns
sns.set(style="whitegrid")
sns.set_context('notebook', font_scale=1.13)
#FORMATO
# fuente
import matplotlib
matplotlib.use('Agg')
import pylab as pl
#avoid warnings
import warnings
warnings.filterwarnings('ignore')
#---------------
#Funciones base.
#---------------
def get_rutesList(rutas):
''' Abre el archivo de texto en la ruta: rutas, devuelve una lista de las lineas de ese archivo.
Funcion base.
#Argumentos
rutas: string, path indicado.
'''
f = open(rutas,'r')
L = f.readlines()
f.close()
return L
def set_modelsettings(ConfigList):
ruta_modelset = get_ruta(ConfigList,'ruta_proj')+get_ruta(ConfigList,'ruta_modelset')
# model settings Json
with open(ruta_modelset, 'r') as f:
model_set = json.load(f)
# Model set
wmf.models.max_aquifer = wmf.models.max_gravita * 10
wmf.models.retorno = model_set['retorno']
wmf.models.show_storage = model_set['show_storage']
wmf.models.separate_fluxes = model_set['separate_fluxes']
wmf.models.dt = model_set['dt']
def round_time(date = dt.datetime.now(),round_mins=5):
'''
Rounds datetime object to nearest 'round_time' minutes.
If 'dif' is < 'round_time'/2 takes minute behind, else takesminute ahead.
Parameters
----------
date : date to round
round_mins : round to this nearest minutes interval
Returns
----------
datetime object rounded, datetime object
'''
dif = date.minute % round_mins
if dif <= round_mins/2:
return dt.datetime(date.year, date.month, date.day, date.hour, date.minute - (date.minute % round_mins))
else:
return dt.datetime(date.year, date.month, date.day, date.hour, date.minute - (date.minute % round_mins)) + dt.timedelta(minutes=round_mins)
def get_credentials(ruta_credenciales):
credentials = json.load(open(ruta_credenciales))
#creds para consultas
mysqlServer = credentials['MySql_Siata']
for key in np.sort(list(credentials['MySql_Siata'].keys()))[::-1]: #1:hal, 2:sal
try:
connection = MySQLdb.connect(host=mysqlServer[key]['host'],
user=mysqlServer[key]['user'],
password=mysqlServer[key]['password'],
db=mysqlServer[key]['db'])
print('SERVER_CON: Succesful connection to %s'%(key))
host=mysqlServer[key]['host']
user=mysqlServer[key]['user']
password=mysqlServer[key]['password']
db=mysqlServer[key]['db']
break #si conecta bien a SAL para.
except:
print('SERVER_CON: No connection to %s'%(key))
pass
#creds para copiar a var
user2copy2var = credentials['cred_2copy2var']['user']; host2copy2var = credentials['cred_2copy2var']['host']
return host,user,password,db,user2copy2var,host2copy2var
def coord2hillID(ruta_nc, df_coordxy):
#lee simubasin pa asociar tramos, saca topologia basica
cu = wmf.SimuBasin(rute= ruta_nc)
cu.GetGeo_Cell_Basics()
cu.GetGeo_Parameters()
#saca coordenadas de todo el simubasin y las distancias entre ellas
coordsX = wmf.cu.basin_coordxy(cu.structure,cu.ncells)[0]
coordsY = wmf.cu.basin_coordxy(cu.structure,cu.ncells)[1]
disty = np.unique(np.diff(np.unique(np.sort(coordsY))))
distx = np.unique(np.diff(np.unique(np.sort(coordsX))))
df_ids = pd.DataFrame(index = df_coordxy.index,columns=['id'])
#identifica el id de la ladera donde caen los ptos
for index in df_coordxy.index:
df_ids.loc[index]=cu.hills_own[np.where((coordsY+disty[0]/2>df_coordxy.loc[index].values[1]) & (coordsY-disty[0]/2<df_coordxy.loc[index].values[1]) & (coordsX+distx[0]/2>df_coordxy.loc[index].values[0]) & (coordsX-distx[0]/2<df_coordxy.loc[index].values[0]))[0]].data
return df_ids
#-----------------------------------
#-----------------------------------
#Funciones de lectura del configfile
#-----------------------------------
#-----------------------------------
def get_ruta(RutesList, key):
''' Busca en una lista 'RutesList' la linea que empieza con el key indicado, entrega rutas.
Funcion base.
#Argumentos
RutesList: Lista que devuelve la funcion en este script get_rutesList()
key: string, key indicado para buscar que linea en la lista empieza con el.
'''
if any(i.startswith('- **'+key+'**') for i in RutesList):
for i in RutesList:
if i.startswith('- **'+key+'**'):
return i.split(' ')[-1][:-1]
else:
return 'Aviso: no existe linea con el key especificado'
def get_line(RutesList, key):
''' Busca en una lista 'RutesList' la linea que empieza con el key indicado, entrega lineas.
Funcion base.
#Argumentos
RutesList: Lista que devuelve la funcion en este script get_rutesList()
key: string, key indicado para buscar que linea en la lista empieza con el.
'''
if any(i.startswith('- **'+key+'**') for i in RutesList):
for i in RutesList:
if i.startswith('- **'+key+'**'):
return i[:-1].split(' ')[2:]
else:
return 'Aviso: no existe linea con el key especificado'
def get_modelPlot(RutesList, PlotType = 'Qsim_map'):
''' #Devuelve un diccionario con la informacion de la tabla Plot en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
- PlotType= boolean, tipo del plot? . Default= 'Qsim_map'.
'''
for l in RutesList:
key = l.split('|')[1].rstrip().lstrip()
if key[3:] == PlotType:
EjecsList = [i.rstrip().lstrip() for i in l.split('|')[2].split(',')]
return EjecsList
return key
def get_modelPars(RutesList):
''' #Devuelve un diccionario con la informacion de la tabla Calib en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
'''
DCalib = {}
for l in RutesList:
c = [float(i) for i in l.split('|')[3:-1]]
name = l.split('|')[2]
DCalib.update({name.rstrip().lstrip(): c})
return DCalib
def get_modelPaths(List):
''' #Devuelve un diccionario con la informacion de la tabla Calib en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
'''
DCalib = {}
for l in List:
c = [i for i in l.split('|')[3:-1]]
name = l.split('|')[2]
DCalib.update({name.rstrip().lstrip(): c[0]})
return DCalib
def get_modelStore(RutesList):
''' #Devuelve un diccionario con la informacion de la tabla Store en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
'''
DStore = {}
for l in RutesList:
l = l.split('|')
DStore.update({l[1].rstrip().lstrip():
{'Nombre': l[2].rstrip().lstrip(),
'Actualizar': l[3].rstrip().lstrip(),
'Tiempo': float(l[4].rstrip().lstrip()),
'Condition': l[5].rstrip().lstrip(),
'Calib': l[6].rstrip().lstrip(),
'BackSto': l[7].rstrip().lstrip(),
'Slides': l[8].rstrip().lstrip()}})
return DStore
def get_modelStoreLastUpdate(RutesList):
''' #Devuelve un diccionario con la informacion de la tabla Update en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
'''
DStoreUpdate = {}
for l in RutesList:
l = l.split('|')
DStoreUpdate.update({l[1].rstrip().lstrip():
{'Nombre': l[2].rstrip().lstrip(),
'LastUpdate': l[3].rstrip().lstrip()}})
return DStoreUpdate
def get_ConfigLines(RutesList, key, keyTable = None, PlotType = None):
''' #Devuelve un diccionario con la informacion de las tablas en el configfile: Calib, Store, Update, Plot.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
- key= string, palabra clave de la tabla que se quiere leer. Puede ser: -s,-t.
- Calib_Storage= string, palabra clave de la tabla que se quiere leer. Puede ser: Calib, Store, Update, Plot.
- PlotType= boolean, tipo del plot? . Default= None.
'''
List = []
for i in RutesList:
if i.startswith('|'+key) or i.startswith('| '+key):
List.append(i)
if len(List)>0:
if keyTable == 'Pars':
return get_modelPars(List)
if keyTable == 'Paths':
return get_modelPaths(List)
if keyTable == 'Store':
return get_modelStore(List)
if keyTable == 'Update':
return get_modelStoreLastUpdate(List)
if keyTable == 'Plot':
return get_modelPlot(List, PlotType=PlotType)
return List
else:
return 'Aviso: no se encuentran lineas con el key de inicio especificado.'
#-----------------------------------
#-----------------------------------
#Funciones generacion de radar
#-----------------------------------
#-----------------------------------
def file_format(start,end):
'''
Returns the file format customized for siata for elements containing
starting and ending point
Parameters
----------
start : initial date
end : final date
Returns
----------
file format with datetimes like %Y%m%d%H%M
Example
----------
'''
start,end = pd.to_datetime(start),pd.to_datetime(end)
format = '%Y%m%d%H%M'
return '%s-%s'%(start.strftime(format),end.strftime(format))
def hdr_to_series(path):
'''
Reads hdr rain files and converts it into pandas Series
Parameters
----------
path : path to .hdr file
Returns
----------
pandas time Series with mean radar rain
'''
s = pd.read_csv(path,skiprows=5,usecols=[2,3]).set_index(' Fecha ')[' Lluvia']
s.index = pd.to_datetime(list(map(lambda x:x.strip()[:10]+' '+x.strip()[11:],s.index)))
return s
def hdr_to_df(path):
'''
Reads hdr rain files and converts it into pandas DataFrame
Parameters
----------
path : path to .hdr file
Returns
----------
pandas DataFrame with mean radar rain
'''
if path.endswith('.hdr') != True:
path = path+'.hdr'
df = pd.read_csv(path,skiprows=5).set_index(' Fecha ')
df.index = pd.to_datetime(list(map(lambda x:x.strip()[:10]+' '+x.strip()[11:],df.index)))
df = df.drop('IDfecha',axis=1)
df.columns = ['record','mean_rain']
return df
def bin_to_df(path,ncells,start=None,end=None,**kwargs):
'''
Reads rain fields (.bin) and converts it into pandas DataFrame
Parameters
----------
path : path to .hdr and .bin file
start : initial date
end : final date
Returns
----------
pandas DataFrame with mean radar rain
Note
----------
path without extension, ejm folder_path/file not folder_path/file.bin,
if start and end is None, the program process all the data
'''
start,end = pd.to_datetime(start),pd.to_datetime(end)
records = df['record'].values
rain_field = []
for count,record in enumerate(records):
if record != 1:
rain_field.append(wmf.models.read_int_basin('%s.bin'%path,record,ncells)[0]/1000.0)
count = count+1
# format = (count*100.0/len(records),count,len(records))
else:
rain_field.append(np.zeros(ncells))
return pd.DataFrame(np.matrix(rain_field),index=df.index)
def get_radar_rain(start,end,Dt,cuenca,codigos,rutaNC,accum=False,path_tif=None,all_radextent=False,
mask=None,meanrain_ALL=True,path_masks_csv=None,complete_naninaccum=False,save_bin=False,
save_class = False,path_res=None,umbral=0.005,
verbose=True, zero_fill = None):
start,end = pd.to_datetime(start),pd.to_datetime(end)
#hora UTC
startUTC,endUTC = start + pd.Timedelta('5 hours'), end + pd.Timedelta('5 hours')
fechaI,fechaF,hora_1,hora_2 = startUTC.strftime('%Y-%m-%d'), endUTC.strftime('%Y-%m-%d'),startUTC.strftime('%H:%M'),endUTC.strftime('%H:%M')
#Obtiene las fechas por dias para listar archivos por dia
datesDias = pd.date_range(fechaI, fechaF,freq='D')
a = pd.Series(np.zeros(len(datesDias)),index=datesDias)
a = a.resample('A').sum()
Anos = [i.strftime('%Y') for i in a.index.to_pydatetime()]
datesDias = [d.strftime('%Y%m%d') for d in datesDias.to_pydatetime()]
#lista los .nc existentes de ese dia: rutas y fechas del nombre del archivo
ListDatesinNC = []
ListRutas = []
for d in datesDias:
try:
L = glob.glob(rutaNC + d + '*.nc')
ListRutas.extend(L)
ListDatesinNC.extend([i.split('/')[-1].split('_')[0] for i in L])
except:
print ('Sin archivos para la fecha %s'%d)
# Organiza las listas de rutas y la de las fechas a las que corresponde cada ruta.
ListRutas.sort()
ListDatesinNC.sort()#con estas fechas se asignaran los barridos a cada timestep.
#index con las fechas especificas de los .nc existentes de radar
datesinNC = [dt.datetime.strptime(d,'%Y%m%d%H%M') for d in ListDatesinNC]
datesinNC = pd.to_datetime(datesinNC)
#Obtiene el index con la resolucion deseada, en que se quiere buscar datos existentes de radar,
textdt = '%d' % Dt
#Agrega hora a la fecha inicial
if hora_1 != None:
inicio = fechaI+' '+hora_1
else:
inicio = fechaI
#agrega hora a la fecha final
if hora_2 != None:
final = fechaF+' '+hora_2
else:
final = fechaF
datesDt = pd.date_range(inicio,final,freq = textdt+'s')
#Obtiene las posiciones de acuerdo al dt para cada fecha, si no hay barrido en ese paso de tiempo se acumula
#elbarrido inmediatamente anterior.
#Saca una lista con las pos de los barridos por cada timestep, y las pega en PosDates
#Si el limite de completar faltantes con barrido anterior es de 10 min, solo se completa si dt=300s
#limite de autocompletar : 10m es decir, solo repito un barrido.
PosDates = []
pos1 = []
pos_completed = []
lim_completed = 3 #ultimos 3 barridos - 15min
for ind,d1,d2 in zip(np.arange(datesDt[:-1].size),datesDt[:-1],datesDt[1:]):
pos2 = np.where((datesinNC<d2) & (datesinNC>=d1))[0].tolist()
# si no hay barridos en el dt de inicio sellena con zero - lista vacia
#y no esta en los primero 3 pasos : 15min.
# si se puede completar
# y si en el los lim_completed pasos atras no hubo más de lim_completed-1 pos con pos_completed=2, lim_completed-1 para que deje correr sólo hasta el lim_completed.
#asi solo se pueded completar y pos_completed=2 una sola vez.
if len(pos2) == 0 and ind not in np.arange(lim_completed) and complete_naninaccum == True and Dt == 300. and np.where(np.array(pos_completed[ind-lim_completed:])==2)[0].size <= lim_completed-1 : #+1 porque coge los ultimos n-1 posiciones.
pos2 = pos1
pos_completed.append(2)
elif len(pos2) == 0:
pos2=[]
pos_completed.append(0)
else:
pos_completed.append(1)
#si se quiere completar y hay barridos en este dt, guarda esta pos para si es necesario completar las pos de dt en el sgte paso
if complete_naninaccum == True and len(pos2) != 0 and Dt == 300. and np.where(np.array(pos_completed[ind-lim_completed:])==2)[0].size <= lim_completed-1 :
pos1 = pos2
else:
pos1 = []
PosDates.append(pos2)
# si se asigna, se agregas dates y PosDates para barridos en cero al final.
if zero_fill is not None:
#se redefinen datesDt luego que los PosDates fueron asignados
final = (pd.to_datetime(final) + pd.Timedelta('%ss'%Dt*zero_fill)).strftime('%Y-%m-%d %H:%M')
datesDt = pd.date_range(inicio,final,freq = textdt+'s')
# se agrega a PosDates pasos del futuro con barridos en cero, y se cambia end.
end = end + pd.Timedelta('%ss'%Dt*zero_fill) #pasos de tiempo:steps, independiente del Dt
for steps in np.arange(zero_fill): PosDates.append([])
# paso a hora local
datesDt = datesDt - dt.timedelta(hours=5)
datesDt = datesDt.to_pydatetime()
#Index de salida en hora local
rng= pd.date_range(start.strftime('%Y-%m-%d %H:%M'),end.strftime('%Y-%m-%d %H:%M'), freq= textdt+'s')
df = pd.DataFrame(index = rng,columns=codigos)
#mascara con shp a parte de wmf
if mask is not None:
#se abre un barrido para sacar la mascara
g = netCDF4.Dataset(ListRutas[PosDates[0][0]])
field = g.variables['Rain'][:].T/(((len(pos)*3600)/Dt)*1000.0)#g['Rain'][:]#
RadProp = [g.ncols, g.nrows, g.xll, g.yll, g.dx, g.dx]
g.close()
longs=np.array([RadProp[2]+0.5*RadProp[4]+i*RadProp[4] for i in range(RadProp[0])])
lats=np.array([RadProp[3]+0.5*RadProp[5]+i*RadProp[5] for i in range(RadProp[1])])
x,y = np.meshgrid(longs,lats)
#mask as a shp
if type(mask) == str:
#boundaries
shp = gpd.read_file(mask)
poly = shp.geometry.unary_union
shp_mask = np.zeros([len(lats),len(longs)])
for i in range(len(lats)):
for j in range(len(longs)):
if (poly.contains(Point(longs[j],lats[i])))==True:
shp_mask[i,j] = 1# Rain_mask es la mascara
l = x[shp_mask==1].min()
r = x[shp_mask==1].max()
d = y[shp_mask==1].min()
a = y[shp_mask==1].max()
#mask as a list with coordinates whithin the radar extent
elif type(mask) == list:
l = mask[0] ; r = mask[1] ; d = mask[2] ; a = mask[3]
x,y = x.T,y.T #aun tengo dudas con el recorte, si en nc queda en la misma pos que los lats,longs.
#boundaries position
x_wh,y_wh = np.where((x>l)&(x<r)&(y>d)&(y<a))
#se redefine sfield con size que corresponde
field = field[np.unique(x_wh)[0]:np.unique(x_wh)[-1],np.unique(y_wh)[0]:np.unique(y_wh)[-1]]
if save_bin and len(codigos)==1 and path_res is not None:
#open nc file
f = netCDF4.Dataset(path_res,'w', format='NETCDF4') #'w' stands for write
tempgrp = f.createGroup('rad_data') # as folder for saving files
lon = longs[np.unique(x_wh)[0]:np.unique(x_wh)[-1]]
lat = lats[np.unique(y_wh)[0]:np.unique(y_wh)[-1]]
#set name and leght of dimensions
tempgrp.createDimension('lon', len(lon))
tempgrp.createDimension('lat', len(lat))
tempgrp.createDimension('time', None)
#building variables
longitude = tempgrp.createVariable('longitude', 'f4', 'lon')
latitude = tempgrp.createVariable('latitude', 'f4', 'lat')
rain = tempgrp.createVariable('rain', 'f4', (('time', 'lat', 'lon')))
time = tempgrp.createVariable('time', 'i4', 'time')
#adding globalattributes
f.description = "Radar rainfall dataset containing one group"
f.history = "Created " + dt.datetime.now().strftime("%d/%m/%y")
#Add local attributes to variable instances
longitude.units = 'degrees east - wgs4'
latitude.units = 'degrees north - wgs4'
time.units = 'minutes since 2020-01-01 00:00'
rain.units = 'mm/h'
#passing data into variables
# use proper indexing when passing values into the variables - just like you would a numpy array.
longitude[:] = lon #The "[:]" at the end of the variable instance is necessary
latitude[:] = lat
else:
# acumular dentro de la cuenca.
cu = wmf.SimuBasin(rute= cuenca)
if save_class:
cuConv = wmf.SimuBasin(rute= cuenca)
cuStra = wmf.SimuBasin(rute= cuenca)
#accumulated in basin
if accum:
if mask is not None:
rvec_accum = np.zeros(field.shape)
dfaccum = pd.DataFrame(index = rng) #este producto no da con mask.
else:
rvec_accum = np.zeros(cu.ncells)
# rvec = np.zeros(cu.ncells)
dfaccum = pd.DataFrame(np.zeros((cu.ncells,rng.size)).T,index = rng)
else:
pass
#all extent
if all_radextent:
radmatrix = np.zeros((1728, 1728))
#ITERA SOBRE LOS BARRIDOS DEL PERIODO Y SE SACAN PRODUCTOS
# print ListRutas
for ind,dates,pos in zip(np.arange(len(datesDt[1:])),datesDt[1:],PosDates):
#escoge como definir el size de rvec
if mask is not None:
rvec = np.zeros(shape = field.shape)
else:
rvec = np.zeros(cu.ncells)
if save_class:
rConv = np.zeros(cu.ncells, dtype = int)
rStra = np.zeros(cu.ncells, dtype = int)
try:
#se lee y agrega lluvia de los nc en el intervalo.
for c,p in enumerate(pos):
#lista archivo leido
if verbose:
print (ListRutas[p])
#Lee la imagen de radar para esa fecha
g = netCDF4.Dataset(ListRutas[p])
rainfield = g.variables['Rain'][:].T/(((len(pos)*3600)/Dt)*1000.0)
RadProp = [g.ncols, g.nrows, g.xll, g.yll, g.dx, g.dx]
#if all extent
if all_radextent:
radmatrix += rainfield
#if mask
if mask is not None and type(mask) == str:
rvec += (rainfield*shp_mask)[np.unique(x_wh)[0]:np.unique(x_wh)[-1],np.unique(y_wh)[0]:np.unique(y_wh)[-1]]
elif mask is not None and type(mask) == list:
rvec += rainfield[np.unique(x_wh)[0]:np.unique(x_wh)[-1],np.unique(y_wh)[0]:np.unique(y_wh)[-1]]
# on WMF.
else:
#Agrega la lluvia en el intervalo
rvec += cu.Transform_Map2Basin(rainfield,RadProp)
if save_class:
ConvStra = cu.Transform_Map2Basin(g.variables['Conv_Strat'][:].T, RadProp)
# 1-stra, 2-conv
rConv = np.copy(ConvStra)
rConv[rConv == 1] = 0; rConv[rConv == 2] = 1
rStra = np.copy(ConvStra)
rStra[rStra == 2] = 0
rvec[(rConv == 0) & (rStra == 0)] = 0
Conv[rvec == 0] = 0
Stra[rvec == 0] = 0
#Cierra el netCDF
g.close()
#muletilla
path = 'bla'
except:
print ('error - no field found ')
path = ''
if accum:
if mask is not None:
rvec += np.zeros(shape = field.shape)
rvec = np.zeros(shape = field.shape)
else:
rvec_accum += np.zeros(cu.ncells)
rvec = np.zeros(cu.ncells)
else:
if mask is not None:
rvec = np.zeros(shape = field.shape)
else:
rvec = np.zeros(cu.ncells)
if save_class:
rConv = np.zeros(cu.ncells)
rStra = np.zeros(cu.ncells)
if all_radextent:
radmatrix += np.zeros((1728, 1728))
#acumula dentro del for que recorre las fechas
if accum:
rvec_accum += rvec
if mask is None: #esto para mask no sirve
dfaccum.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]= rvec
else:
pass
# si se quiere sacar promedios de lluvia de radar en varias cuencas definidas en 'codigos'
#subbasins defined for WMF
if meanrain_ALL and mask is None:
mean = []
df_posmasks = pd.read_csv(path_masks_csv,index_col=0)
for codigo in codigos:
if path == '': # si no hay nc en ese paso de tiempo.
mean.append(np.nan)
else:
mean.append(np.sum(rvec*df_posmasks['%s'%codigo])/float(df_posmasks['%s'%codigo][df_posmasks['%s'%codigo]==1].size))
# se actualiza la media de todas las mascaras en el df.
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
else:
pass
#guarda binario y df, si guardar binaria paso a paso no me interesa rvecaccum
if mask is None and save_bin == True and len(codigos)==1 and path_res is not None:
mean = []
#guarda en binario
dentro = cu.rain_radar2basin_from_array(vec = rvec,
ruta_out = path_res,
fecha = dates,
dt = Dt,
umbral = umbral)
#si guarda nc de ese timestep guarda clasificados
if dentro == 0:
hagalo = True
else:
hagalo = False
#mira si guarda o no los clasificados
if save_class:
#Escribe el binario convectivo
aa = cuConv.rain_radar2basin_from_array(vec = rConv,
ruta_out = path_res+'_conv',
fecha = dates,
dt = Dt,
doit = hagalo)
#Escribe el binario estratiforme
aa = cuStra.rain_radar2basin_from_array(vec = rStra,
ruta_out = path_res+'_stra',
fecha = dates,
dt = Dt,
doit = hagalo)
#guarda en df meanrainfall.
try:
mean.append(rvec.mean())
except:
mean.append(np.nan)
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
elif mask is None and save_bin == True and len(codigos)==1 and path_res is None: #si es una cuenca pero no se quiere guardar binarios.
mean = []
#guarda en df meanrainfall.
try:
mean.append(rvec.mean())
except:
mean.append(np.nan)
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
#guardar .nc con info de recorte de radar: mask.
if mask is not None and save_bin and len(codigos)==1 and path_res is not None:
mean = []
#https://pyhogs.github.io/intro_netcdf4.html
rain[ind,:,:] = rvec.T
time[ind] = int((dates - pd.to_datetime('2010-01-01 00:00')).total_seconds()/60) #min desde 2010
if ind == np.arange(len(datesDt[1:]))[-1]:
f.close()
print ('.nc saved')
#guarda en df meanrainfall.
if path == '': # si no hay nc en ese paso de tiempo.
mean.append(np.nan)
else:
mean.append(np.sum(rvec)/float(shp_mask[shp_mask==1].size))
#save
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
if mask is None and save_bin == True and len(codigos)==1 and path_res is not None:
#Cierrra el binario y escribe encabezado
cu.rain_radar2basin_from_array(status = 'close',ruta_out = path_res)
print ('.bin & .hdr saved')
if save_class:
cuConv.rain_radar2basin_from_array(status = 'close',ruta_out = path_res+'_conv')
cuStra.rain_radar2basin_from_array(status = 'close',ruta_out = path_res+'_stra')
print ('.bin & .hdr escenarios saved')
else:
print ('.bin & .hdr NOT saved')
pass
#elige los retornos.
if accum == True and path_tif is not None:
cu.Transform_Basin2Map(rvec_accum,path_tif)
return df,rvec_accum,dfaccum
elif accum == True and mask is not None:
return df,rvec_accum
elif accum == True and mask is None:
return df,rvec_accum,dfaccum
elif all_radextent:
return df,radmatrix
else:
return df
def get_radar_rain_OP(start,end,Dt,cuenca,codigos,rutaNC,accum=False,path_tif=None,all_radextent=False,
meanrain_ALL=True,complete_naninaccum=False, evs_hist=False,save_bin=False,save_class = False,
path_res=None,umbral=0.005,include_escenarios = None,
verbose=True):
'''
Read .nc's file forn rutaNC:101Radar_Class within assigned period and frequency.
Por ahora solo sirve con un barrido por timestep, operacional a 5 min, melo.
0. It divides by 1000.0 and converts from mm/5min to mm/h.
1. Get mean radar rainfall in basins assigned in 'codigos' for finding masks, if the mask exist.
2. Write binary files if is setted.
- Cannot do both 1 and 2.
- To saving binary files (2) set: meanrain_ALL=False, save_bin=True, path_res= path where to write results,
len('codigos')=1, nc_path aims to the one with dxp and simubasin props setted.
Parameters
----------
start: string, date&time format %Y-%m%-d %H:%M, local time.
end: string, date&time format %Y-%m%-d %H:%M, local time.
Dt: float, timedelta in seconds. For this function it should be lower than 3600s (1h).
cuenca: string, simubasin .nc path with dxp and format from WMF. It should be 260 path if whole catchment analysis is needed, or any other .nc path for saving the binary file.
codigos: list, with codes of stage stations. Needed for finding the mask associated to a basin.
rutaNC: string, path with .nc files from radar meteorology group. Default in amazonas: 101Radar_Class
Optional Parameters
----------
accum: boolean, default False. True for getting the accumulated matrix between start and end.
Change returns: df,rvec (accumulated)
path_tif: string, path of tif to write accumlated basin map. Default None.
all_radextent:boolean, default False. True for getting the accumulated matrix between start and end in the
whole radar extent. Change returns: df,radmatrix.
meanrain_ALL: boolean, defaul True. True for getting the mean radar rainfall within several basins which mask are defined in 'codigos'.
save_bin: boolean, default False. True for saving .bin and .hdr files with rainfall and if len('codigos')=1.
save_class: boolean,default False. True for saving .bin and .hdr for convective and stratiform classification. Applies if len('codigos')=1 and save_bin = True.
path_res: string with path where to write results if save_bin=True, default None.
umbral: float. Minimum umbral for writing rainfall, default = 0.005.
Returns
----------
- df whith meanrainfall of assiged codes in 'codigos'.
- df,rvec if accum = True.
- df,radmatrix if all_radextent = True.
- save .bin and .hdr if save_bin = True, len('codigos')=1 and path_res=path.
'''
#### FECHAS Y ASIGNACIONES DE NC####
start,end = pd.to_datetime(start),pd.to_datetime(end)
#hora UTC
startUTC,endUTC = start + | pd.Timedelta('5 hours') | pandas.Timedelta |
"""
use cross validation to plot mean ROC curve, show std
ref:
https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc_crossval.html#sphx-glr-auto-examples-model-selection-plot-roc-crossval-py
Note that you have to tune the parameters yourself
"""
from scipy import interp
import argparse
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
# import xgboost as xgb
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.colors as clr
import numpy as np
from matplotlib.colors import ListedColormap
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import pandas as pd
import matplotlib.pylab as plt
import numpy as np
import scipy
import seaborn as sns
import glob
from sklearn.model_selection import KFold,StratifiedKFold
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression,RidgeClassifier,SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
# from mlxtend.classifier import StackingCVClassifier
# import umap
import warnings
from sklearn.metrics import roc_curve,roc_auc_score,average_precision_score,precision_recall_curve
from sklearn.datasets import load_iris
# from mlxtend.classifier import StackingCVClassifier
# from mlxtend.feature_selection import ColumnSelector
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
import warnings
warnings.filterwarnings('ignore')
from sklearn.exceptions import ConvergenceWarning
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=ConvergenceWarning)
from sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor,RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics.scorer import make_scorer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.base import TransformerMixin
from sklearn.datasets import make_regression
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor,GradientBoostingClassifier
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.linear_model import LinearRegression, Ridge
import scipy
import numpy as np
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import LeaveOneOut
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error
from sklearn import linear_model
from sklearn.kernel_ridge import KernelRidge
from sklearn.svm import SVR,LinearSVC
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge,Lars,BayesianRidge
from copy import deepcopy as dp
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier,RadiusNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.gaussian_process import GaussianProcessClassifier
# from xgboost import XGBClassifier
def sklearn_RF(par=False):
est = RandomForestClassifier(n_estimators=1000,random_state=0,warm_start=False,n_jobs=-1,class_weight={1:4,0:1})
if par:
est = RandomForestClassifier(**par)
myDict = {}
return est, myDict
def plot_top_features(reg,X,y,output):
current_feature_df = pd.DataFrame()
current_feature_df['features'] = X.columns.tolist()
reg.fit(X,y)
try:
current_feature_df['score'] = list(reg.feature_importances_)
except:
try:
current_feature_df['score'] = list(reg.coef_)
except:
current_feature_df['score'] = list(reg.coef_[0])
current_feature_df = current_feature_df.sort_values('score',ascending=False)
plt.figure(figsize=(len(current_feature_df['features']*2),8))
sns.barplot(x=current_feature_df['features'],y=current_feature_df['score'] )
plt.xticks(rotation=90)
plt.xlabel("")
plt.ylabel("Feature importance")
plt.savefig("%s_feature_importance.pdf"%(output), bbox_inches='tight')
def simple_CV_evaluation(model,params,X,y):
outer = StratifiedKFold(n_splits=3,shuffle=False)
my_pred=[]
my_true=[]
best_features = X.columns.tolist()
auPRC_list = []
auROC_list = []
for train_index, test_index in outer.split(X,y):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
# print (list(set(X_train.index.tolist()).intersection(X_test.index.tolist())))
current_model = dp(model)
current_model.fit(X_train[best_features].values,y_train)
pred_y = current_model.predict_proba(X_test[best_features].values)
pred_y = [x[1] for x in pred_y]
y_test = y_test.tolist()
auROC = roc_auc_score(y_test,pred_y)
auPRC = average_precision_score(y_test,pred_y)
my_pred += pred_y
my_true += y_test
print ("auPRC: %s. auROC: %s"%(auPRC,auROC))
auPRC_list.append(auPRC)
auROC_list.append(auROC)
df = pd.DataFrame()
df['true']=my_true
df['pred']=my_pred
return df,auROC_list,auPRC_list
def plot_auROC_multi(df,color_dict):
sns.set_style("white")
plt.figure()
for s,d in df.groupby('label'):
plot_df = pd.DataFrame()
x_predict,y_predict,_ = roc_curve(d['true'],d['pred'])
auc = roc_auc_score(d['true'],d['pred'])
print (auc)
plot_df['x'] = x_predict
plot_df['y'] = y_predict
sns.lineplot(data=plot_df,x="x",y="y",ci=0,label="%s AUC:%.2f"%(s,auc),color=color_dict[s])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best',title="")
# plt.savefig("auROC.png")
plt.savefig("auROC.pdf", bbox_inches='tight')
plt.close()
def plot_auPRC_multi(df,color_dict):
sns.set_style("white")
plt.figure()
for s,d in df.groupby('label'):
plot_df = pd.DataFrame()
y_predict,x_predict,_ = precision_recall_curve(d['true'],d['pred'])
auc = average_precision_score(d['true'],d['pred'])
print (auc)
plot_df['x'] = x_predict
plot_df['y'] = y_predict
sns.lineplot(data=plot_df,x="x",y="y",ci=0,label="%s AUC:%.2f"%(s,auc),color=color_dict[s])
# plt.plot([0, 1], [0, 1], 'k--')
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision-Recall curve')
plt.legend(loc='best',title="")
# plt.savefig("auPRC.png")
plt.savefig("auPRC.pdf", bbox_inches='tight')
plt.close()
def define_high_low(x,mu,sigma):
t = 1
low = mu-t*sigma
high = mu+t*sigma
high2= mu+t*sigma
# print (low,high)
if low <= x <= high:
return 0
if x > high2:
return 1
return -1
def boxplot_paired_t_test(a,b,color_dict,ylabel,output):
sns.set_style("whitegrid")
df = pd.DataFrame()
df['All_variants'] = a
df['GWAS_only'] = b
myMin = df.min().min()
myMax = df.max().max()
plot_df = pd.melt(df)
plt.figure()
ax=sns.boxplot(x="variable",y='value',data=plot_df,palette =color_dict,linewidth=3)
for patch in ax.artists:
r, g, bb, _ = patch.get_facecolor()
patch.set_facecolor((r, g, bb, .3))
sns.swarmplot(x="variable", y="value", data=plot_df,palette =color_dict)
unit=0.01
y=myMax+unit*1
h=unit*2
plt.plot([0, 0, 1, 1], [y, y+h, y+h, y], lw=1.5, c="black")
plt.text(0.5, y+h+unit, "Paired T-test: %.2E" % scipy.stats.ttest_rel(a,b).pvalue, ha='center', va='bottom', color="black")
plt.ylim(myMin-unit*5,myMax+0.1)
plt.ylabel(ylabel)
plt.savefig("%s.pdf"%(output), bbox_inches='tight')
def main():
color_dict = {}
color_dict['All_variants']="#fa6525"
color_dict['GWAS_only']="#00b8a5"
df = | pd.read_csv("snp_data.tsv",sep="\t") | pandas.read_csv |
# IMPORT data processing and data visualisationn libraries
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import json
from operator import itemgetter
global s,brnch,year
global brnch2,year2
global gpmembers
global gpname
gpmembers=[]
gpname=[]
# Create the main function to draw bar graph
def mainfunc(x,y,plot_type):
# Load the database as a dictionary from database1.txt file (created database)
with open('dataBase1.txt', 'r') as f:
s = f.read()
database = json.loads(s)
rollno = []
rollno.append(x[0].upper())
rollno2 = []
if(len(y)==1):
rollno2.append(y[0].upper())
branch={ # branches as specified in the dataextractor file
'1':"CIVIL",
'2':"ELECTRICAL",
'3':"MECHANICAL",
'4':"ECE",
'5':"CSE",
'6':"ARCHI",
'7':"Chemical",
'8':"MATERIAL",
"MI4":"ECE DUAl",
"MI5":"CSE DUAL"
}
try:
try: # Selecting year and branch on basis of rollno.
year = 8-int(rollno[0][1])
az = rollno[0][2:-2]
for i in branch.keys():
if az == i:
brnch = branch[i]
except:
print("Please enter a valid rollno.")
if(len(rollno2)==1):
try:
year2 = 8-int(rollno2[0][1])
az2 = rollno2[0][2:-2]
for i in branch.keys():
if az2 == i:
brnch2 = branch[i]
except:
print("Please enter a valid rollno.")
# For SGPI data representation
if("SGPI" in plot_type):
semester = ['1st', '2nd', '3rd', '4th','5th', '6th', '7th', '8th'] # semesters list
x = database[str(year)][brnch][rollno[0]]["sgpi"]
y=[]
for i in x:
m = ((i.split("=")[1]))
y.append(float(m))
#Creating dataframe of student having sgpi
db = pd.DataFrame(y,columns=['SGPI'])
db['Semester']=semester[:year*2]
db["Name"] = list((database[str(year)][brnch][rollno[0]]["name"]) for i in range(year*2))
if(len(rollno2)==1):
q = database[str(year2)][brnch2][rollno2[0]]["sgpi"]
y2=[]
for i in q:
m2 = ((i.split("=")[1]))
y2.append(float(m2))
db2 = pd.DataFrame(y2,columns=['SGPI'])
db2['Semester']=semester[:year2*2]
db2["Name"] = list((database[str(year2)][brnch2][rollno2[0]]["name"]) for i in range(year*2))
db = pd.concat([db,db2])
# Plotting the bar graph
sns.set(style="whitegrid")
pl = sns.barplot(x="Semester", y="SGPI", data=db, hue='Name')#plotting barplot & setting parameters
plt.ylim(1,12.5)
plt.title("SGPI VS Semester")
ax = plt.gca()
totals=[]
for i in ax.patches:
totals.append(i.get_height())
# set individual bar lables using above list
total = sum(totals)
# Setting the place of bar labels
for i in ax.patches:
# get_x pulls left or right; get_height pushes up or down
z1=i.get_height()
z1 = "%0.2f" % float(z1)
ax.text(i.get_x()+(i.get_width()/2), i.get_height()/2, \
z1, fontsize=(20-(year*2)),
color='black', ha= 'center')
plt.show() #Shows the plot
# For CGPI data representation
if("CGPI" in plot_type):
semester = ['1st', '2nd', '3rd', '4th','5th', '6th', '7th', '8th']
x = database[str(year)][brnch][rollno[0]]["cgpi"] #getting cgpi data of student
y=[]
for i in x:
m = ((i.split("=")[1]))
y.append(float(m))
#Creating the dataframe of the student having CGPI
db = pd.DataFrame(y,columns=['CGPI'])
db['Semester']=semester[:year*2]
db["Name"] = list((database[str(year)][brnch][rollno[0]]["name"]) for i in range(year*2))
if(len(rollno2)==1):
q = database[str(year2)][brnch2][rollno2[0]]["cgpi"]
y2=[]
for i in q:
m2 = ((i.split("=")[1]))
y2.append(float(m2))
db2 = | pd.DataFrame(y2,columns=['CGPI']) | pandas.DataFrame |
"""
Library Features:
Name: lib_data_io_shapefile
Author(s): <NAME> (<EMAIL>)
Date: '20190109'
Version: '1.0.0'
"""
#######################################################################################
# Library
import geopandas as gpd
import pandas as pd
import logging
from src.common.default.lib_default_args import sLoggerName
from src.common.driver.configuration.drv_configuration_debug import Exc
# Logging
oLogStream = logging.getLogger(sLoggerName)
#######################################################################################
# -------------------------------------------------------------------------------------
# Method to open ASCII file (in read or write mode)
def openFile(sFileName):
try:
oFileData = gpd.read_file(sFileName)
return oFileData
except IOError as oError:
Exc.getExc(' =====> ERROR: in open file (lib_data_io_shapefile' + ' [' + str(oError) + ']', 1, 1)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to get selected or all data from shapefile handle
def getData(oFileData, oFileFields_SEL=None):
oFileGeoms = ((feature['geometry'], 1) for feature in oFileData.iterfeatures())
oFileFields_ALL = oFileData.columns.values.tolist()
if oFileFields_SEL is None:
oFileFields_SEL = oFileFields_ALL
oPoints = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
#
# Copyright 2017-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from collections import defaultdict
import numpy as np
import pandas as pd
import scipy.sparse as sps
from ..globalvar import SOURCE, TARGET, WEIGHT
from .validation import require_dataframe_has_columns, comma_sep
class ExternalIdIndex:
"""
An ExternalIdIndex maps between "external IDs" and "integer locations" or "internal locations"
(ilocs).
It is designed to allow handling only efficient integers internally, but easily convert between
them and the user-facing IDs.
"""
def __init__(self, ids):
self._index = | pd.Index(ids) | pandas.Index |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 12:46:52 2020
Updated on June 3, 2021
@author: jacob
"""
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize as optim
import numpy as np
from scipy import stats
#from heatmap import heatmap_gr, heatmap_ymax
#from graph_repl import graph_repls
# How many time points are graphed
XSCALE = 97
def graph_avg(df_dict, con_data, exp_data, con_name, exp_name, data_path, plate_list, hm_flag=False, log_flag=False):
""" Plot Formatting """
# You typically want your plot to be ~1.33x wider than tall.
# Common sizes: (10, 7.5) and (12, 9)
plt.figure(figsize=(10, 7.5))
con_color = "#0466c8"
exp_color = "#d62828"
# Remove the plot frame lines. They are unnecessary
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
# Set background to white
ax.set_facecolor('white')
# Ensure that the axis ticks only show up on the bottom and left of the plot.
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Limit the range of the plot to only where the data is.
plt.ylim(0, 2)
plt.xlim(0, XSCALE)
# Make sure your axis ticks are large enough to be easily read.
plt.yticks(np.arange(0, 1.7, 0.2), [str(round(x, 1)) for x in np.arange(0, 1.7, 0.2)], fontsize=14)
plt.xticks(np.arange(0, XSCALE, 24), [str(round(x,1)) for x in np.arange(0, XSCALE, 24)], fontsize=14)
# Provide tick lines across the plot to help your viewers trace along the axis ticks.
for y in np.arange(0, 1.7, 0.2):
plt.plot(range(0, XSCALE), [y] * len(range(0, XSCALE)), "--", lw=0.5, color="black", alpha=0.3)
""" Calculations """
# Parameter fits for individual control wells
control_grs = []
control_ymaxs = []
# Storing wells to compute average of replicate
control_wells = []
con_avg = con_name + "_avg"
# Lists of parameter values for the experimental replicate
exp_grs = []
exp_ymaxs = []
# Storing wells to compute average of replicate
exp_wells = []
exp_avg = exp_name + "_avg"
avg_df = pd.DataFrame()
# Calculate parameter values for individual wells
for plate_name in con_data.keys():
# Plate
df = df_dict[plate_name]
plat = plate_list[plate_name]
# Wells in that specific plate that belong to given control replicate
wells = con_data[plate_name]
for well in wells:
if well == "":
break
control_wells.append(df[well])
gr, ymax, line = fit_model(df, well)
plat.add_params(gr, ymax, well)
if gr < 2:
control_grs.append(gr)
if ymax < 2:
control_ymaxs.append(ymax)
for plate_name in exp_data.keys():
# Plate
df = df_dict[plate_name]
plat = plate_list[plate_name]
# Wells in that specific plate that belong to given control replicate
wells = exp_data[plate_name]
for well in wells:
if well == "":
break
exp_wells.append(df[well])
gr, ymax, line = fit_model(df, well)
plat.add_params(gr, ymax, well)
if gr < 2:
exp_grs.append(gr)
if ymax < 2:
exp_ymaxs.append(ymax)
avg_df["Time"] = df["Time"]
# Calculate averages for replicates
con_mean, con_std, con_ci = avg_well(control_wells)
avg_df[con_avg] = con_mean
avg_df[con_name + "_std"] = con_std
avg_df[con_name + "_ci"] = con_ci
exp_mean, exp_std, exp_ci = avg_well(exp_wells)
avg_df[exp_avg] = exp_mean
avg_df[exp_name + "_std"] = exp_std
avg_df[exp_name + "_ci"] = exp_ci
# Parameter fits for average control model
con_gr, con_ymax, con_line = fit_model(avg_df, con_avg)
# Parameter fits for average exp model
exp_gr, exp_ymax, exp_line = fit_model(avg_df, exp_avg)
# T-test for growth rate and ymax parameter values
gr_stats = t_test(exp_grs, control_grs)
ymax_stats = t_test(exp_ymaxs, control_ymaxs)
# P-values
gr_pval = gr_stats[1]
ymax_pval = ymax_stats[1]
if con_ymax > 0.01 and exp_ymax > 0.01:
# Normalize experimental parameters with control parameters
gr_ratio = (exp_gr / con_gr)
ymax_ratio = (exp_ymax / con_ymax)
else:
gr_ratio = 0
ymax_ratio = 0
# Symbols on graph to indicate better growth by experimental strain
better_gr = ""
if gr_ratio > 1:
better_gr += "^ "
better_ymax = ""
if ymax_ratio > 1:
better_ymax += "^ "
""" Graphing """
# Graph average experimental line
plt.plot(avg_df["Time"], avg_df[exp_avg], color=exp_color, label=(exp_name), linewidth=3.0)
# plt.plot(*exp_line, 'r', linestyle = "--", color=exp_color, linewidth=1)
# Confidence intervals
exp_ci_hi = avg_df[exp_avg] + avg_df[exp_name + "_ci"]
exp_ci_low = avg_df[exp_avg] - avg_df[exp_name + "_ci"]
plt.plot(avg_df["Time"], exp_ci_hi, color=exp_color, linestyle=":", linewidth=1.5)
plt.plot(avg_df["Time"], exp_ci_low, color=exp_color, linestyle=":", linewidth=1.5)
# Graph average control line
plt.plot(avg_df["Time"], avg_df[con_avg], color=con_color, label=(con_name), linewidth=3.0)
# plt.plot(*con_line, 'r', linestyle = "--", color=con_color, linewidth=1)
# Confidence intervals
con_ci_hi = avg_df[con_avg] + avg_df[con_name + "_ci"]
con_ci_low = avg_df[con_avg] - avg_df[con_name + "_ci"]
plt.plot(avg_df["Time"], con_ci_hi, color=con_color, linestyle=":", linewidth=1.5)
plt.plot(avg_df["Time"], con_ci_low, color=con_color, linestyle=":", linewidth=1.5)
# Plot histograms
# graph_repls(con_grs, con_ymaxs, exp_grs, exp_ymax,con_name, exp_name, data_path)
# Place a legend to the right
lgd = ax.legend(
loc = 'upper right',
borderaxespad = 0.,
facecolor = 'white',
fontsize = 16)
# Format P-values
if gr_pval < 0.001:
gr_pval = "<0.001"
else:
gr_pval = round(gr_pval, 3)
if ymax_pval < 0.001:
ymax_pval = "<0.001"
else:
ymax_pval = round(ymax_pval, 3)
plt.title(f"{exp_name} vs. {con_name}- GR ratio:{round(gr_ratio, 3)} ({gr_pval}) Ymax ratio: {round(ymax_ratio, 3)} ({ymax_pval})", fontsize=20)
path = data_path + "Graphs/Averages/" + exp_name + "_vs_" + con_name
plt.savefig(path, bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close()
# Graph each well of a replicate
def graph_indiv(df_dict, repl_data, repl_name, data_path, plate_list, log_flag=False):
""" Graph Formatting """
# You typically want your plot to be ~1.33x wider than tall.
# Common sizes: (10, 7.5) and (12, 9)
plt.figure(figsize=(10, 7.5))
# Remove the plot frame lines. They are unnecessary
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
# Set background to white
ax.set_facecolor('white')
# Ensure that the axis ticks only show up on the bottom and left of the plot.
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Limit the range of the plot to only where the data is.
plt.ylim(0, 2)
plt.xlim(0, XSCALE)
# Make sure your axis ticks are large enough to be easily read.
plt.yticks(np.arange(0, 1.7, 0.2), [str(round(x, 1)) for x in np.arange(0, 1.7, 0.2)], fontsize=14)
plt.xticks(np.arange(0, XSCALE, 24), [str(round(x,1)) for x in np.arange(0, XSCALE, 24)], fontsize=14)
# Provide tick lines across the plot to help your viewers trace along the axis ticks.
for y in np.arange(0, 1.7, 0.2):
plt.plot(range(0, XSCALE), [y] * len(range(0, XSCALE)), "--", lw=0.5, color="black", alpha=0.3)
# Graph each replicate well
n = 0
for plate_name in repl_data.keys():
# Plate
df = df_dict[plate_name]
plat = plate_list[plate_name]
# Wells in that specific plate that belong to given control replicate
wells = repl_data[plate_name]
# Counter for number of viable wells
n = 0
for well in wells:
if well == "":
break
try:
gr, ymax = plat.get_params(well)
except KeyError:
gr, ymax, line = fit_model(df, well)
plat.add_params(gr, ymax, well)
if ymax > 0.2:
n += 1
plt.plot(df["Time"], df[well], label=well, linewidth=2.5)
# Place a legend to the right
lgd = ax.legend(
loc = 'upper right',
borderaxespad = 0.,
facecolor = 'white',
ncol = 2,
fontsize = 16)
plt.title(f"{repl_name} Isolates: ({n} isolates with growth)", fontsize=24)
path = data_path + "Graphs/" + repl_name
plt.savefig(path, bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close()
""" Auxillary Functions """
# P-test on individual wells
def t_test(data1, data2) -> int:
ind_ttest = stats.ttest_ind(data1, data2)
return ind_ttest
def fit_model(df, well):
# Calculate exponential portion of data for line fit
exp = exponential_section(df, well)
# Fitting lines to exponential portions to graph
slope = 0
if not exp.empty:
slope, line = fit_line(exp, well)
else:
line = [(0,0), (0,0)]
# Fit a logistical model to calculate growth rate
p0 = np.random.exponential(size=3) # Initialize random values
bounds = (0, [10000000., 100., 10000000.]) # Set bounds
# Prepare model
xt = np.array(df["Time"])
yt = np.array(df[well])
# If no logistic curve can be fit, default to less sophisticated method of fitting line to exponentional section of the graph
try:
# Fit model 1
(a, gr, ymax), cov = optim.curve_fit(logistic, xt, yt, bounds=bounds, p0=p0)
except (RuntimeError, ValueError):
gr = slope
ymax = max(df[well])
return gr, ymax, line
# Estimates expinential growth section of growth curve to compute growth rate
def exponential_section(df, well):
ymax = max(df[well])
ymin = min(df[well])
ymid = (ymax + ymin) / 2.0
span = ymax - ymin
low = ymid - (span * 0.40)
high = ymid + (span * 0.40)
exp = df.loc[(df[well] >= low) & (df[well] <= high)]
return exp[["Time", well]]
# Fits a line to a given section of a graph. Returns the slope and endpoints of the line
def fit_line(exp, well):
exp["Time"] = pd.to_numeric(exp["Time"])
exp[well] = | pd.to_numeric(exp[well]) | pandas.to_numeric |
from typing import List, Optional, Dict
from matplotlib.axes import Axes
from numpy import sum
from pandas import Series, DataFrame, isnull
from survey.mixins.data_mixins import ObjectDataMixin
from survey.mixins.data_types.categorical_mixin import CategoricalMixin
from survey.mixins.named import NamedMixin
from survey.questions import Question
class CategoricalPercentageQuestion(
NamedMixin,
ObjectDataMixin,
CategoricalMixin,
Question
):
"""
Class to represent a Survey Question where the Respondent is asked to
assign a percentage to a number of Categorical items, totalling a maximum
of 100%.
"""
def __init__(self, name: str, text: str, categories: List[str],
data: Optional[Series] = None):
"""
Create a new categorical percentage question.
:param name: A pythonic name for the question.
:param text: The text asked in the question.
:param categories: The list of possible choices.
:param data: Optional pandas Series of responses. Each non-null response
should be a Dict[str, float] mapping categories to
percentages.
"""
self._set_name_and_text(name, text)
self._set_categories(categories)
self.data = data
def _validate_data(self, data: Series):
data = data.dropna()
values = Series([
value for answer in data.values
for value in answer.values()
])
if (values < 0).sum() != 0:
raise ValueError(
'Data for CategoricalPercentageQuestion must be non-negative.'
)
sums = data.map(lambda d: sum(list(d.values())))
if (sums > 100).sum() != 0:
raise ValueError(
'Answers to CategoricalPercentageQuestion cannot sum to more '
'than 100%'
)
def make_features(self, answers: Series = None, drop_na: bool = True,
naming: str = '{{name}}: {{choice}}') -> DataFrame:
"""
Create DataFrame of features for use in ML.
:param answers: Answers to the Question from a Survey. If left as None
then use the Question's attached data.
:param drop_na: Whether to drop null rows (rows where respondent was not
asked a question).
:param naming: Pattern to use to name the columns.
'{{name}}' will be substituted with the name of the
question.
'{{choice}}' will be substituted with the name of the
choice.
"""
if answers is None:
answers = self._data
if drop_na:
# drop respondents that weren't asked the question
answers = answers.dropna()
feature_list = []
categories = self.category_names
if len(answers) > 0:
# create features
for _, answer in answers.iteritems():
if isnull(answer):
feature_list.append({})
else:
feature_list.append(answer)
features = DataFrame(feature_list)
for category in categories:
if category not in features.columns:
features[category] = 0
some_values = isnull(features).sum(axis=1) < len(categories)
features.loc[some_values] = features.loc[some_values].fillna(0)
features = features[categories]
else:
# create empty dataframe with the right columns
features = | DataFrame(columns=categories, index=answers.index) | pandas.DataFrame |
import pandas as pd
import numpy as np
ht_fail = pd.read_csv('/content/sample_data/heart failur classification dataset.csv')
ht_fail.head(5)
ht_fail.shape
ht_fail.isnull()
ht_fail.isnull().sum()
#Imputing missing values
from sklearn.impute import SimpleImputer
impute = SimpleImputer(missing_values=np.nan, strategy='mean')
impute.fit(ht_fail[['time']])
ht_fail['time'] = impute.transform(ht_fail[['time']])
ht_fail[['time']]
#Imputing missing values
from sklearn.impute import SimpleImputer
impute = SimpleImputer(missing_values=np.nan, strategy='mean')
impute.fit(ht_fail[['serum_sodium']])
ht_fail['serum_sodium'] = impute.transform(ht_fail[['serum_sodium']])
ht_fail[['serum_sodium']]
ht_fail.isnull().sum()
#Handling categorical features
#ht_fail.info
ht_fail
ht_fail['smoking'].unique()
ht_fail['smoking'] = ht_fail['smoking'].map({'No':0,'Yes':1})
ht_fail
ht_fail['sex'].unique()
ht_fail['sex'] = ht_fail['sex'].map({'Male':0,'Female':1})
ht_fail
#Train_Test Split
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(ht_fail.iloc[:, :-1], ht_fail.iloc[:,-1],random_state=1)
#SVM
from sklearn.svm import SVC
svc = SVC(kernel="linear")
svc.fit(x_train, y_train)
pre_score_svm = svc.score(x_test, y_test)
print("Training accuracy of the model is {:.2f}".format(svc.score(x_train, y_train)))
print("Testing accuracy of the model is {:.2f}".format(svc.score(x_test, y_test)))
predictions = svc.predict(x_test)
print(predictions)
#MLP
from sklearn.neural_network import MLPClassifier
nnc=MLPClassifier(hidden_layer_sizes=(7), activation="relu", max_iter=1000000)
nnc.fit(x_train, y_train)
pre_score_mlp = nnc.score(x_test, y_test)
print("The Training accuracy of the model is {:.2f}".format(nnc.score(x_train, y_train)))
print("The Testing accuracy of the model is {:.2f}".format(nnc.score(x_test, y_test)))
predictions = nnc.predict(x_test)
print(predictions)
#Random Forest
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=50)
rfc.fit(x_train, y_train)
pre_score_rndmForest = rfc.score(x_test, y_test)
print("The Training accuracy of the model is {:.2f}".format(rfc.score(x_train, y_train)))
print("The Testing accuracy of the model is {:.2f}".format(rfc.score(x_test, y_test)))
predictions = rfc.predict(x_test)
print(predictions)
#performance without dimensionality reduction
from sklearn.neighbors import KNeighborsClassifier
knn=KNeighborsClassifier(n_neighbors=4)
knn.fit(x_train, y_train)
print("Training accuracy is {:.2f}".format(knn.score(x_train, y_train)) )
print("Testing accuracy is {:.2f} ".format(knn.score(x_test, y_test)) )
htfail_origin = np.array(ht_fail.iloc[:, :-1])
htfail_origin_target = np.array(ht_fail.iloc[:,-1])
#dimensionality reduction
from sklearn.preprocessing import StandardScaler
scaler= StandardScaler()
htfail_df= pd.DataFrame(scaler.fit_transform(htfail_origin.data))
htfail_df=htfail_df.assign(target=htfail_origin_target)
#PCA
from sklearn.decomposition import PCA
pca = PCA(n_components=7)
principal_components= pca.fit_transform(htfail_origin.data)
print(principal_components)
pca.explained_variance_ratio_
sum(pca.explained_variance_ratio_)
principal_df = | pd.DataFrame(data=principal_components) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This is a module for bifurcation analysis.
"""
__author__ = '<NAME>'
__status__ = 'Editing'
__version__ = '1.1.0'
__date__ = '26 August 2020'
import os
import sys
sys.path.append('../')
sys.path.append('../anmodel')
"""
LIMIT THE NUMBER OF THREADS!
change local env variables BEFORE importing numpy
"""
os.environ["OMP_NUM_THREADS"] = "1" # 2nd likely
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1" # most likely
from copy import copy
from datetime import datetime
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from multiprocessing import Pool
import numpy as np
import pandas as pd
from pathlib import Path
import pickle
from scipy.integrate import odeint
import scipy.optimize
from scipy.stats import gaussian_kde
import sympy as sym
from tqdm import tqdm
from typing import Dict, List, Tuple, Iterator, Optional
import anmodel
import analysistools
class Analysis:
def __init__(self, param: pd.Series,
model: str='AN', wavepattern: str='SWS',
channel_bool: Optional[Dict]=None,
model_name: Optional[str]=None,
ion: bool=False, concentration: Dict=None) -> None:
self.param = param
self.model = model
self.wavepattern = wavepattern
if self.model == 'SAN':
self.model_name = 'SAN'
self.model = anmodel.models.SANmodel(ion, concentration)
if self.model == "X":
if channel_bool is None:
raise TypeError('Designate channel in argument of X model.')
self.model_name = model_name
self.model = anmodel.models.Xmodel(channel_bool, ion, concentration)
self.model.set_params(self.param)
self.samp_freq = 100000
self.l_pad = 0.1
self.v_pad = 10
def change_params(self, channel:str, magnif: float) -> None:
if channel != 'g_nal' and channel != 'g_kl':
p = copy(self.param)
p[channel] = p[channel] * magnif
self.model.set_params(p)
else:
self.model.set_params(self.param)
self.model.leak.set_div()
gnal = self.model.leak.gnal
gkl = self.model.leak.gkl
if channel == 'g_nal':
gxl = copy(gnal)
gxl = gxl * magnif
self.model.leak.set_gna(gxl)
elif channel == 'g_kl':
gxl = copy(gkl)
gxl = gxl * magnif
self.model.leak.set_gk(gxl)
def ode_bifur(self, channel: Optional[str]=None,
magnif: Optional[float]=None) -> np.ndarray:
if channel is not None:
self.change_params(channel, magnif)
s, _ = self.model.run_odeint(samp_freq=self.samp_freq)
return s
def set_s(self, st: int, en: int,
channel: Optional[str]=None,
magnif: Optional[float]=None) -> None:
self.s = self.ode_bifur(channel, magnif)
self.st = st
self.en = en
def nullcline(self, t: int, ax: plt.axes,
mode='t', flow=False, density=1,
channel: Optional[str]=None,
magnif: Optional[float]=None) -> plt.axes:
lmin = self.s[self.st:self.en, 1].min() - self.l_pad
lmax = self.s[self.st:self.en, 1].max() + self.l_pad
vmin = self.s[self.st:self.en, 0].min() - self.v_pad
vmax = self.s[self.st:self.en, 0].max() + self.v_pad
l_grid, v_grid = np.meshgrid(np.arange(lmin, lmax, 0.001),
np.arange(vmin, vmax, 0.1))
if mode == 't':
ca = self.s[t, 2]
elif mode == 'ca':
ca = t
if self.model_name == 'SAN':
dldt = np.array([self.model.kvhh.dndt(v, n) for (v, n) in zip(v_grid.ravel(), l_grid.ravel())]).reshape(l_grid.shape)
dvdt = self.model.dvdt([v_grid, l_grid, ca])
elif self.model_name == 'RAN':
dldt = self.model.kvsi.dmdt(v_grid, l_grid)
dvdt = self.model.dvdt({
'v': v_grid,
'm_kvsi': l_grid,
'ca': ca,
})
ct1 = ax.contour(v_grid, l_grid, dldt,
levels=[0], colors='steelblue', # 4682b4
linestyles='-', linewidths=3)
ct2 = ax.contour(v_grid, l_grid, dvdt,
levels=[0], colors='forestgreen', # 228b22
linestyles='-', linewidths=3)
# ct1.collections[0].set_label('$dm/dt=0$')
# ct2.collections[0].set_label('$dv/dt=0$')
if flow:
ax.streamplot(np.arange(vmin, vmax, 0.1),
np.arange(lmin, lmax, 0.001),
dvdt.T, dldt.T, color='gray', density=density)
return ax
def diagram(self, ca_range: List, start_points: List[float],
ax: plt.axes, plot: bool=True, stability: bool=True,
legend: bool=False, color=True) -> plt.axes :
eq_color = {
'Stable node' : 'C0',
'Unstable node' : 'limegreen', # ff8c00
'Saddle' : 'darkorange', # 556b2f
'Stable focus' : 'turquoise', # 4169e1
'Unstable focus' : 'slateblue', # c71585
'Center (Hopf)' : 'C5',
'Transcritical (Saddle-Node)' : 'C6'
}
eq_linestyle = {
'Stable node' : 'solid',
'Unstable node' : 'solid',
'Saddle' : 'solid',
'Stable focus' : 'solid',
'Unstable focus' : 'solid',
'Center (Hopf)' : 'solid',
'Transcritical (Saddle-Node)' : 'solid'
}
ca_space = np.linspace(ca_range[0], ca_range[1], 1000)[::-1]
def _findroot(func, init):
sol, _, convergence, _ = scipy.optimize.fsolve(func, init, full_output=1)
if convergence == 1:
return sol
return np.array([np.nan]*1)
def _numerical_continuation(func, v_ini: float, ca_space: np.ndarray):
eq = []
for ca in ca_space:
eq.append(_findroot(lambda x: func(x, ca),
eq[-1] if eq else v_ini))
return eq
def _func(v: float, ca: float) -> float:
if self.model_name == 'SAN':
l_inf = self.model.kvhh.n_inf(v=v)
dvdt = self.model.dvdt([v, l_inf, ca])
elif self.model_name == 'RAN':
l_inf = self.model.kvsi.m_inf(v=v)
dvdt = self.model.dvdt({
'v': v,
'm_kvsi': l_inf,
'ca': ca
})
return dvdt
def _jacobian(v, ca):
x, y = sym.symbols('x, y')
if self.model_name == 'SAN':
l = self.model.kvhh.n_inf(v)
dfdx = sym.diff(self.model.dvdt([x, y, ca]), x)
dfdy = sym.diff(self.model.dvdt([x, y, ca]), y)
dgdx = sym.diff(self.model.kvhh.dndt(v=x, n=y), x)
dgdy = sym.diff(self.model.kvhh.dndt(v=x, n=y), y)
elif self.model_name == 'RAN':
l = self.model.kvsi.m_inf(v=v)
dfdx = sym.diff(self.model.dvdt({'v': x, 'm_kvsi': y, 'ca': ca}), x)
dfdy = sym.diff(self.model.dvdt({'v': x, 'm_kvsi': y, 'ca': ca}), y)
dgdx = sym.diff(self.model.kvsi.dmdt(v=x, m=y), x)
dgdy = sym.diff(self.model.kvsi.dmdt(v=x, m=y), y)
j = np.array([[np.float(dfdx.subs([(x, v), (y, l)])),
np.float(dfdy.subs([(x, v), (y, l)]))],
[np.float(dgdx.subs([(x, v), (y, l)])),
np.float(dgdy.subs([(x, v), (y, l)]))]])
return j
def _stability(j) -> str:
det = np.linalg.det(j)
trace = np.matrix.trace(j)
if np.isclose(trace, 0) and np.isclose(det, 0):
nat = 'Center (Hopf)'
elif np.isclose(det, 0):
nat = 'Transcritical (Saddle-Node)'
elif det < 0:
nat = 'Saddle'
else:
nat = 'Stable' if trace < 0 else 'Unstable'
nat += ' focus' if (trace**2 - 4 * det) < 0 else ' node'
return nat
def _get_branches(start_points):
branches = []
for init in start_points:
eq = _numerical_continuation(_func, init, ca_space)
nat = [_stability(_jacobian(v, ca))
for (v, ca) in zip(eq, ca_space)]
branches.append((np.array([x for x in eq]), nat))
return branches
def _get_segments(nats: List['str']) -> Dict:
st = 0
seg = {}
for i, val in enumerate(nats[1:], 1):
if val != nats[st] or i == len(nats)-1:
seg[(st, i)] = nats[st]
st = i
return seg
if not plot:
eq_lst = []
for init in start_points:
eq = _numerical_continuation(_func, init, ca_space)
eq_lst.append(eq)
return eq_lst
if not stability:
for init in start_points:
eq = _numerical_continuation(_func, init, ca_space)
ax.plot(ca_space, [x for x in eq], color='k')
return ax
branches = _get_branches(start_points)
labels = frozenset()
for eq, nat in branches:
labels = labels.union(frozenset(nat))
seg = _get_segments(nat)
for idx, n in seg.items():
# ax.plot(ca_space[idx[0]:idx[1]], eq[idx[0]:idx[1]],
# color=eq_color[n] if n in eq_color else 'k',
# linestyle=eq_linestyle[n] if n in eq_linestyle else '-')
if color:
ax.plot(eq[idx[0]:idx[1]], ca_space[idx[0]:idx[1]],
color=eq_color[n] if n in eq_color else 'k',
linestyle=eq_linestyle[n] if n in eq_linestyle else '-',
linewidth=4)
else:
ax.plot(eq[idx[0]:idx[1]], ca_space[idx[0]:idx[1]],
color='gray',
linestyle=eq_linestyle[n] if n in eq_linestyle else '-',
linewidth=4)
if legend:
ax.legend([mpatches.Patch(color=eq_color[n]) for n in labels],
labels,
bbox_to_anchor=(1.05, 1), loc='upper left',
borderaxespad=0, fontsize=16)
return ax
class AttractorAnalysis:
def __init__(self, model: str='AN', wavepattern: str='SWS',
channel_bool: Optional[Dict]=None,
model_name: Optional[str]=None,
ion: bool=False, concentration: Dict=None) -> None:
self.model = model
self.wavepattern = wavepattern
if self.model == 'SAN':
self.model_name = 'SAN'
self.model = anmodel.models.SANmodel(ion, concentration)
if self.model == 'X':
if channel_bool is None:
raise TypeError('Designate channel in argument of X model.')
self.model_name = model_name
self.model = anmodel.models.Xmodel(channel_bool, ion, concentration)
self.samp_freq = 10000
def dvdt(self, args: List) -> float:
if self.model_name == 'SAN':
v, n = args
return ((-10.0*self.model.params.area
* (self.model.kvhh.i(v, n=n)
+ self.model.cav.i(v)
+ self.model.kca.i(v, ca=self.ca)
+ self.model.nap.i(v)
+ self.model.leak.i(v)))
/ (10.0*self.model.params.cm*self.model.params.area))
elif self.model_name == 'RAN':
v, m = args
return ((-10.0*self.model.params.area
* (self.model.kvsi.i(v, m=m)
+ self.model.cav.i(v)
+ self.model.kca.i(v, ca=self.ca)
+ self.model.nap.i(v)
+ self.model.leak.i(v)))
/ (10.0*self.model.params.cm*self.model.params.area))
def diff_op(self, args: List, time: np.ndarray) -> List:
if self.model_name == 'SAN':
v, n = args
dvdt = self.dvdt(args)
dndt = self.model.kvhh.dndt(v=v, n=n)
return [dvdt, dndt]
elif self.model_name == 'RAN':
v, m = args
dvdt = self.dvdt(args)
dmdt = self.model.kvsi.dmdt(v=v, m=m)
return [dvdt, dmdt]
def run_odeint(self, ini: List, samp_len: int=10):
solvetime = np.linspace(1, 1000*samp_len, self.samp_freq*samp_len)
s, _ = odeint(self.diff_op, ini, solvetime,
atol=1.0e-5, rtol=1.0e-5, full_output=True)
return s
def change_param(self, channel: str, magnif: float) -> None:
if channel != 'g_nal' and channel != 'g_kl':
p = copy(self.param)
p[channel] = p[channel] * magnif
self.model.set_params(p)
else:
self.model.set_params(self.param)
self.model.leak.set_div()
gnal = self.model.leak.gnal
gkl = self.model.leak.gkl
if channel == 'g_nal':
gxl = copy(gnal)
gxl = gxl * magnif
self.model.leak.set_gna(gxl)
elif channel == 'g_kl':
gxl = copy(gkl)
gxl = gxl * magnif
self.model.leak.set_gk(gxl)
def find_attractor(self, start_points: Tuple) -> float:
def _findroot(func, init: float) -> np.ndarray:
sol, _, convergence, _ = scipy.optimize.fsolve(func, init, full_output=1)
if convergence == 1:
return sol
return np.array([np.nan]*1)
def _func(v: float, ca: float) -> float:
if self.model_name == 'SAN':
l_inf = self.model.kvhh.n_inf(v=v)
dvdt = self.model.dvdt([v, l_inf, ca])
elif self.model_name == 'RAN':
l_inf = self.model.kvsi.m_inf(v=v)
dvdt = self.model.dvdt({
'v': v,
'm_kvsi': l_inf,
'ca': ca
})
return dvdt
def _jacobian(v: float, ca: float) -> np.ndarray:
x, y = sym.symbols('x, y')
if self.model_name == 'SAN':
l = self.model.kvhh.n_inf(v)
dfdx = sym.diff(self.model.dvdt([x, y, ca]), x)
dfdy = sym.diff(self.model.dvdt([x, y, ca]), y)
dgdx = sym.diff(self.model.kvhh.dndt(v=x, n=y), x)
dgdy = sym.diff(self.model.kvhh.dndt(v=x, n=y), y)
elif self.model_name == 'RAN':
l = self.model.kvsi.m_inf(v=v)
dfdx = sym.diff(self.model.dvdt({'v': x, 'm_kvsi': y, 'ca': ca}), x)
dfdy = sym.diff(self.model.dvdt({'v': x, 'm_kvsi': y, 'ca': ca}), y)
dgdx = sym.diff(self.model.kvsi.dmdt(v=x, m=y), x)
dgdy = sym.diff(self.model.kvsi.dmdt(v=x, m=y), y)
j = np.array([[np.float(dfdx.subs([(x, v), (y, l)])),
np.float(dfdy.subs([(x, v), (y, l)]))],
[np.float(dgdx.subs([(x, v), (y, l)])),
np.float(dgdy.subs([(x, v), (y, l)]))]])
return j
def _stability(j: np.ndarray) -> str:
det = np.linalg.det(j)
trace = np.matrix.trace(j)
if np.isclose(trace, 0) and np.isclose(det, 0):
nat = 'Center (Hopf)'
elif np.isclose(det, 0):
nat = 'Transcritical (Saddle-Node)'
elif det < 0:
nat = 'Saddle'
else:
nat = 'Stable' if trace < 0 else 'Unstable'
nat += ' focus' if (trace**2 - 4 * det) < 0 else ' node'
return nat
for init in start_points:
eq = _findroot(lambda x: _func(x, self.ca), init)[0]
nat = _stability(_jacobian(eq, self.ca))
if nat == 'Stable focus':
return eq # v value at stable focus attractor
raise Exception('Stabel focus attractor was not found.')
def singleprocess(self, args: Tuple) -> None:
core, res_df, self.ca, v_start, res_p, resname, channel, magnif = args
samp_len = 10
vini_lst = res_df.columns
lini_lst = res_df.index
if channel is not None:
self.change_param(channel, magnif)
vatt = self.find_attractor(v_start)
if self.model_name == 'SAN':
latt = self.model.kvhh.n_inf(v=vatt)
elif self.model_name == 'RAN':
latt = self.model.kvsi.m_inf(v=vatt)
def _recur(ini: List, samp_len: str) -> float:
s = self.run_odeint(ini, samp_len)
try:
t_v = np.where(np.abs(s[:, 0]-vatt) < 1.0e-3)[0]
t_l = np.where(np.abs(s[:, 1]-latt) < 1.0e-5)[0]
t = np.intersect1d(t_v, t_l)[0]
return t
except IndexError:
if samp_len > 100:
raise Exception
samp_len = samp_len * 2
_recur(ini, samp_len)
for lini in lini_lst:
for vini in tqdm(vini_lst):
ini = [vini, lini]
t = _recur(ini, samp_len)
# t = np.max([t_v, t_l])
res_df.loc[lini, vini] = t
with open(res_p/resname, 'wb') as f:
pickle.dump(res_df, f)
def multi_singleprocess(self, ncore: int, filename: str,
vargs: List, largs: List ,
ca: float, v_start: Tuple,
channel: Optional[str]=None,
magnif: Optional[float]=None) -> None:
"""
Parameters
----------
vargs : Tuple
vmin, vmax, vint
largs : Tuple
l: n_kvhh in SAN model and m_kvsi in RAN model. lmin, lmax, lint
"""
now = datetime.now()
date: str = f'{now.year}_{now.month}_{now.day}'
p: Path = Path.cwd().parents[0]
data_p: Path = p / 'results' / f'{self.wavepattern}_params' / self.model_name
if channel is None:
dicname = f'{ca}_{vargs[0]}_{vargs[1]}_{largs[0]}_{largs[1]}'
else:
dicname = f'{ca}_{vargs[0]}_{vargs[1]}_{largs[0]}_{largs[1]}_{channel}_{magnif}'
res_p: Path = p / 'results' / 'bifurcation' / 'attractor_time' / f'{self.model_name}' / dicname
res_p.mkdir(parents=True, exist_ok=True)
with open(data_p/filename, 'rb') as f:
self.param = pickle.load(f)
self.model.set_params(self.param)
v_lst = np.linspace(vargs[0], vargs[1], vargs[2])
l_lst = np.linspace(largs[0], largs[1], vargs[2])
args: List = []
for core, ll_lst in enumerate(np.array_split(l_lst, ncore)):
res_df = pd.DataFrame(index=ll_lst, columns=v_lst)
resname = f'{date}_{filename}_{core}.pickle'
args.append((core, res_df, ca, v_start, res_p, resname, channel, magnif))
with Pool(processes=ncore) as pool:
pool.map(self.singleprocess, args)
def load_data(self, date: str, filename: str,
ncore: int, ca: float,
vrange: List, lrange: List,
channel: Optional[str]=None,
magnif: Optional[float]=None) -> None:
p: Path = Path.cwd().parents[0]
if channel is None:
dicname = f'{ca}_{vrange[0]}_{vrange[1]}_{lrange[0]}_{lrange[1]}'
else:
dicname = f'{ca}_{vrange[0]}_{vrange[1]}_{lrange[0]}_{lrange[1]}_{channel}_{magnif}'
res_p: Path = p / 'results' / 'bifurcation' / 'attractor_time' / f'{self.model_name}' / dicname
with open(res_p/f'{date}_{filename}.pickle_0.pickle', 'rb') as f:
self.res_df = pickle.load(f)
for core in range(1, ncore):
resname = f'{date}_{filename}.pickle_{core}.pickle'
with open(res_p/resname, 'rb') as f:
r_df = pickle.load(f)
self.res_df = pd.concat([self.res_df, r_df]).sort_index(ascending=False)
class WavePattern:
def __init__(self, model: str='AN', wavepattern: str='SWS',
channel_bool: Optional[Dict]=None,
model_name: Optional[str]=None,
ion: bool=False, concentration: Dict=None) -> None:
self.model = model
self.wavepattern = wavepattern
if self.model == 'AN':
self.model_name = 'AN'
self.model = anmodel.models.ANmodel(ion, concentration)
if self.model == 'SAN':
self.model_name = 'SAN'
self.model = anmodel.models.SANmodel(ion, concentration)
if self.model == "X":
if channel_bool is None:
raise TypeError('Designate channel in argument of X model.')
self.model_name = model_name
self.model = anmodel.models.Xmodel(channel_bool, ion, concentration)
self.samp_freq=1000
self.wc = anmodel.analysis.WaveCheck()
def singleprocess(self, args: List) -> None:
now, param, channel = args
date: str = f'{now.year}_{now.month}_{now.day}'
p: Path = Path.cwd().parents[0]
res_p: Path = p / 'results' / 'bifurcation' / 'wavepattern' / f'{self.model_name}_{self.wavepattern}'
res_p.mkdir(parents=True, exist_ok=True)
save_p: Path = res_p / f'{date}_{channel}.pickle'
magnif_arr = np.arange(0, 2.001, 0.001)
df = pd.DataFrame(index=magnif_arr, columns=['WavePattern'])
for magnif in tqdm(magnif_arr):
if channel != 'g_kleak' and channel != 'g_naleak':
p = copy(param)
p[channel] = p[channel] * magnif
self.model.set_params(p)
elif channel == 'g_kleak':
self.model.leak.set_div()
self.model.set_params(param)
g_kl = self.model.leak.gkl
g = copy(g_kl)
g = g * magnif
self.model.leak.set_gk(g)
elif channel == 'g_naleak':
self.model.leak.set_div()
self.model.set_params(param)
g_nal = self.model.leak.gnal
g = copy(g_nal)
g = g * magnif
self.model.leak.set_gna(g)
s, _ = self.model.run_odeint(samp_freq=self.samp_freq)
# if you want to detect the SWS firing pattern in the method that
# Tatsuki et al. or Yoshida et al. applied, you should use the code below.
# if self.wavepattern == 'SWS':
# wp: anmodel.analysis.WavePattern = self.wc.pattern(s[5000:, 0])
# elif self.wavepattern == 'SPN':
# wp: anmodel.analysis.WavePattern = self.wc.pattern_spn(s[5000:, 0])
if self.wavepattern == 'SWS':
spike = 'peak'
elif self.wavepattern == 'SPN':
spike = 'bottom'
wp: anmodel.analysis.WavePattern = self.wc.pattern_spn(s[5000:, 0], spike)
df.loc[magnif] = wp
with open(save_p, 'wb') as f:
pickle.dump(df, f)
def multi_singleprocess(self, filename) -> None:
args = []
now = datetime.now()
p: Path = Path.cwd().parents[0]
data_p: Path = p / 'results' / f'{self.wavepattern}_params' / self.model_name
with open(data_p/filename, 'rb') as f:
param = pickle.load(f)
ch_lst = list(param.index)
if 'g_leak' in ch_lst:
ch_lst.remove('g_leak')
ch_lst.extend(['g_kleak', 'g_naleak'])
for channel in ch_lst:
args.append((now, param, channel))
with Pool(processes=len(ch_lst)) as pool:
pool.map(self.singleprocess, args)
class Simple(WavePattern):
def __init__(self, model: str='AN', wavepattern: str='SWS',
channel_bool: Optional[Dict]=None,
model_name: Optional[str]=None,
ion: bool=False, concentration: Dict=None) -> None:
super().__init__(model, wavepattern, channel_bool, model_name,
ion, concentration)
def singleprocess(self, args: List) -> None:
now, df, channel = args
date: str = f'{now.year}_{now.month}_{now.day}'
p: Path = Path.cwd().parents[0]
res_p: Path = p / 'results' / 'bifurcation' / 'simple' / f'{self.model_name}_{self.wavepattern}'
res_p.mkdir(parents=True, exist_ok=True)
save_p: Path = res_p / f'{date}_{channel}.pickle'
res_df = pd.DataFrame(index=range(len(df)), columns=[channel])
def _judge() -> anmodel.analysis.WavePattern:
s, _ = self.model.run_odeint(samp_freq=self.samp_freq)
# if you want to detect the SWS firing pattern in the method that
# Tatsuki et al. or Yoshida et al. applied, you should use the code below.
# if self.wavepattern == 'SWS':
# wp: anmodel.analysis.WavePattern = self.wc.pattern(s[5000:, 0])
# elif self.wavepattern == 'SPN':
# wp: anmodel.analysis.WavePattern = self.wc.pattern_spn(s[5000:, 0])
if self.wavepattern == 'SWS':
spike = 'peak'
elif self.wavepattern == 'SPN':
spike = 'bottom'
wp: anmodel.analysis.WavePattern = self.wc.pattern_spn(s[5000:, 0], spike)
return wp
if channel != 'g_kleak' and channel != 'g_naleak':
for i in tqdm(range(len(df))):
param = df.iloc[i, :]
if channel != 't_ca':
param[channel] = param[channel] / 1000
else:
param[channel] = param[channel] * 1000
self.model.set_params(param)
wp = _judge()
res_df.iloc[i, 0] = wp
elif channel == 'g_kleak':
self.model.leak.set_div()
for i in tqdm(range(len(df))):
param = df.iloc[i, :]
self.model.set_params(param)
g_kl = self.model.leak.gkl
self.model.leak.set_gk(g_kl/1000)
wp = _judge()
res_df.iloc[i, 0] = wp
elif channel == 'g_naleak':
self.model.leak.set_div()
for i in tqdm(range(len(df))):
param = df.iloc[i, :]
self.model.set_params(param)
g_nal = self.model.leak.gnal
self.model.leak.set_gna(g_nal/1000)
wp = _judge()
res_df.iloc[i, 0] = wp
with open(save_p, 'wb') as f:
pickle.dump(res_df, f)
def multi_singleprocess(self, filename, divleak=False) -> None:
args = []
now = datetime.now()
p: Path = Path.cwd().parents[0]
data_p: Path = p / 'results' / f'{self.wavepattern}_params' / self.model_name
with open(data_p/filename, 'rb') as f:
df = pickle.load(f)
ch_lst = list(df.columns)
if 'g_leak' in ch_lst:
if divleak:
ch_lst.remove('g_leak')
ch_lst.extend(['g_kleak', 'g_naleak'])
else:
pass
for channel in ch_lst:
args.append((now, df, channel))
with Pool(processes=len(ch_lst)) as pool:
pool.map(self.singleprocess, args)
class Property:
def __init__(self, channel: str, magnif: float,
model: str='AN', wavepattern: str='SWS',
channel_bool: Optional[Dict]=None,
model_name: Optional[str]=None,
ion: bool=False, concentration: Dict=None) -> None:
self.model = model
self.wavepattern = wavepattern
if self.model == 'AN':
self.model_name = 'AN'
self.model = anmodel.models.ANmodel(ion, concentration)
if self.model == 'SAN':
self.model_name = 'SAN'
self.model = anmodel.models.SANmodel(ion, concentration)
if self.model == "X":
if channel_bool is None:
raise TypeError('Designate channel in argument of X model.')
self.model_name = model_name
self.model = anmodel.models.Xmodel(channel_bool, ion, concentration)
self.samp_freq = 1000
self.channel = channel
self.magnif = magnif
self.wc = anmodel.analysis.WaveCheck()
self.fs = anmodel.analysis.FreqSpike(samp_freq=self.samp_freq)
def getinfo(self, v: np.ndarray) -> List[float]:
if self.wavepattern == 'SWS':
nspike, _ = self.fs.get_spikeinfo(v)
sq: np.ndarray = self.fs.square_wave(v, spike='peak')
elif self.wavepattern == 'SPN':
nspike, _ = self.fs.get_ahpinfo(v)
sq: np.ndarray = self.fs.square_wave(v, spike='bottom')
lenburst: int = len(np.where(sq==1)[0])
lensilent: int = len(np.where(sq==0)[0])
reslst: List[float] = [nspike,
nspike / 6,
lenburst,
lenburst / 6,
lensilent,
lensilent / 6
]
return reslst
def main(self, filename: str, t_filename='normal'):
now: datetime = datetime.now()
date: str = f'{now.year}_{now.month}_{now.day}'
p: Path = Path.cwd().parents[0]
res_p: Path = p / 'results' / 'bifurcation' / 'property' / f'{self.model_name}_{self.wavepattern}'
res_p.mkdir(parents=True, exist_ok=True)
save_p: Path = res_p /f'{filename}'
save_p.mkdir(parents=True, exist_ok=True)
data_p: Path = p / 'results' / f'{self.wavepattern}_params' / self.model_name
time_p: Path = p / 'results' / 'normalization_mp_ca'
if t_filename == 'normal':
t_file = time_p / f'{self.wavepattern}_{self.model_name}_time.pickle'
elif t_filename == 'bifur':
t_file = time_p /'bifurcation_all'/f'{self.model_name}' / f'{filename}_{self.channel}_{self.magnif}_time.pickle'
else:
t_file = time_p / f'{t_filename}'
with open(data_p/filename, 'rb') as f:
param_df = pickle.load(f)
param_df.index = range(len(param_df))
with open(t_file, 'rb') as f:
time_df = pickle.load(f).dropna(how='all')
time_df.index = range(len(time_df))
print(len(param_df))
print(len(time_df))
if len(param_df) != len(time_df):
raise IndexError('Parameter dataframe and time dataframe do not match!!')
data: List = ['nspike',
'average_spike_per_burst',
'lenburst',
'average_length_of_burst',
'lensilent',
'average_length_of_silent'
]
df: pd.DataFrame = | pd.DataFrame(columns=data) | pandas.DataFrame |
from collections import OrderedDict
import datetime
from datetime import timedelta
from io import StringIO
import json
import os
import numpy as np
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas._testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
_mixed_frame = _frame.copy()
def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
expected = expected.reset_index(drop=True)
if orient == "values":
expected.columns = range(len(expected.columns))
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning")
class TestPandasContainer:
@pytest.fixture(autouse=True)
def setup(self):
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self, orient):
df = DataFrame(
[["a", "b"], ["c", "d"]],
index=['index " 1', "index / 2"],
columns=["a \\ b", "y / z"],
)
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["split", "records", "values"])
def test_frame_non_unique_index(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["index", "columns"])
def test_frame_non_unique_index_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
msg = f"DataFrame index must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
@pytest.mark.parametrize("orient", ["split", "values"])
@pytest.mark.parametrize(
"data",
[
[["a", "b"], ["c", "d"]],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
],
)
def test_frame_non_unique_columns(self, orient, data):
df = DataFrame(data, index=[1, 2], columns=["x", "x"])
result = read_json(
df.to_json(orient=orient), orient=orient, convert_dates=["x"]
)
if orient == "values":
expected = pd.DataFrame(data)
if expected.iloc[:, 0].dtype == "datetime64[ns]":
# orient == "values" by default will write Timestamp objects out
# in milliseconds; these are internally stored in nanosecond,
# so divide to get where we need
# TODO: a to_epoch method would also solve; see GH 14772
expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000
elif orient == "split":
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("orient", ["index", "columns", "records"])
def test_frame_non_unique_columns_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
msg = f"DataFrame columns must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
def test_frame_default_orient(self, float_frame):
assert float_frame.to_json() == float_frame.to_json(orient="columns")
@pytest.mark.parametrize("dtype", [False, float])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype, float_frame):
data = float_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = float_frame
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [False, np.int64])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
data = self.intframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = self.intframe.copy()
if (
numpy
and (is_platform_32bit() or is_platform_windows())
and not dtype
and orient != "split"
):
# TODO: see what is causing roundtrip dtype loss
expected = expected.astype(np.int32)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
df = DataFrame(
np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)],
dtype=dtype,
)
# TODO: do we even need to support U3 dtypes?
if numpy and dtype == "U3" and orient != "split":
pytest.xfail("Can't decode directly to array")
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = df.copy()
if not dtype:
expected = expected.astype(np.int64)
# index columns, and records orients cannot fully preserve the string
# dtype for axes as the index and column labels are used as keys in
# JSON objects. JSON keys are by definition strings, so there's no way
# to disambiguate whether those keys actually were strings or numeric
# beforehand and numeric wins out.
# TODO: Split should be able to support this
if convert_axes and (orient in ("split", "index", "columns")):
expected.columns = expected.columns.astype(np.int64)
expected.index = expected.index.astype(np.int64)
elif orient == "records" and convert_axes:
expected.columns = expected.columns.astype(np.int64)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_categorical(self, orient, convert_axes, numpy):
# TODO: create a better frame to test with and improve coverage
if orient in ("index", "columns"):
pytest.xfail(f"Can't have duplicate index values for orient '{orient}')")
data = self.categorical.to_json(orient=orient)
if numpy and orient in ("records", "values"):
pytest.xfail(f"Orient {orient} is broken with numpy=True")
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.categorical.copy()
expected.index = expected.index.astype(str) # Categorical not preserved
expected.index.name = None # index names aren't preserved in JSON
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_empty(self, orient, convert_axes, numpy, empty_frame):
data = empty_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = empty_frame.copy()
# TODO: both conditions below are probably bugs
if convert_axes:
expected.index = expected.index.astype(float)
expected.columns = expected.columns.astype(float)
if numpy and orient == "values":
expected = expected.reindex([0], axis=1).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_timestamp(self, orient, convert_axes, numpy):
# TODO: improve coverage with date_format parameter
data = self.tsframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.tsframe.copy()
if not convert_axes: # one off for ts handling
# DTI gets converted to epoch values
idx = expected.index.astype(np.int64) // 1000000
if orient != "split": # TODO: handle consistently across orients
idx = idx.astype(str)
expected.index = idx
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_mixed(self, orient, convert_axes, numpy):
if numpy and orient != "split":
pytest.xfail("Can't decode directly to array")
index = pd.Index(["a", "b", "c", "d", "e"])
values = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": [True, False, True, False, True],
}
df = DataFrame(data=values, index=index)
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = df.copy()
expected = expected.assign(**expected.select_dtypes("number").astype(np.int64))
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize(
"data,msg,orient",
[
('{"key":b:a:d}', "Expected object or value", "columns"),
# too few indices
(
'{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"Shape of passed values is \(3, 2\), indices imply \(2, 2\)",
"split",
),
# too many columns
(
'{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
"3 columns passed, passed data had 2 columns",
"split",
),
# bad key
(
'{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"unexpected key\(s\): badkey",
"split",
),
],
)
def test_frame_from_json_bad_data_raises(self, data, msg, orient):
with pytest.raises(ValueError, match=msg):
read_json(StringIO(data), orient=orient)
@pytest.mark.parametrize("dtype", [True, False])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_frame_from_json_missing_data(self, orient, convert_axes, numpy, dtype):
num_df = DataFrame([[1, 2], [4, 5, 6]])
result = read_json(
num_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
assert np.isnan(result.iloc[0, 2])
obj_df = DataFrame([["1", "2"], ["4", "5", "6"]])
result = read_json(
obj_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
if not dtype: # TODO: Special case for object data; maybe a bug?
assert result.iloc[0, 2] is None
else:
assert np.isnan(result.iloc[0, 2])
@pytest.mark.parametrize("inf", [np.inf, np.NINF])
@pytest.mark.parametrize("dtype", [True, False])
def test_frame_infinity(self, orient, inf, dtype):
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = inf
result = read_json(df.to_json(), dtype=dtype)
assert np.isnan(result.iloc[0, 2])
@pytest.mark.skipif(
is_platform_32bit(), reason="not compliant on 32-bit, xref #15865"
)
@pytest.mark.parametrize(
"value,precision,expected_val",
[
(0.95, 1, 1.0),
(1.95, 1, 2.0),
(-1.95, 1, -2.0),
(0.995, 2, 1.0),
(0.9995, 3, 1.0),
(0.99999999999999944, 15, 1.0),
],
)
def test_frame_to_json_float_precision(self, value, precision, expected_val):
df = pd.DataFrame([dict(a_float=value)])
encoded = df.to_json(double_precision=precision)
assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=["jim", "joe"])
assert not df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
# GH 7445
result = pd.DataFrame({"test": []}, index=[]).to_json(orient="columns")
expected = '{"test":{}}'
assert result == expected
def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=["jim", "joe"])
df["joe"] = df["joe"].astype("i8")
assert df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
def test_frame_mixedtype_orient(self): # GH10289
vals = [
[10, 1, "foo", 0.1, 0.01],
[20, 2, "bar", 0.2, 0.02],
[30, 3, "baz", 0.3, 0.03],
[40, 4, "qux", 0.4, 0.04],
]
df = DataFrame(
vals, index=list("abcd"), columns=["1st", "2nd", "3rd", "4th", "5th"]
)
assert df._is_mixed_type
right = df.copy()
for orient in ["split", "index", "columns"]:
inp = df.to_json(orient=orient)
left = read_json(inp, orient=orient, convert_axes=False)
tm.assert_frame_equal(left, right)
right.index = np.arange(len(df))
inp = df.to_json(orient="records")
left = read_json(inp, orient="records", convert_axes=False)
tm.assert_frame_equal(left, right)
right.columns = np.arange(df.shape[1])
inp = df.to_json(orient="values")
left = read_json(inp, orient="values", convert_axes=False)
tm.assert_frame_equal(left, right)
def test_v12_compat(self, datapath):
df = DataFrame(
[
[1.56808523, 0.65727391, 1.81021139, -0.17251653],
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
[1.51493992, 0.11805825, 1.629455, -1.31506612],
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
[0.05951614, -2.69652057, 1.28163262, 0.34703478],
],
columns=["A", "B", "C", "D"],
index=pd.date_range("2000-01-03", "2000-01-07"),
)
df["date"] = pd.Timestamp("19920106 18:21:32.12")
df.iloc[3, df.columns.get_loc("date")] = pd.Timestamp("20130101")
df["modified"] = df["date"]
df.iloc[1, df.columns.get_loc("modified")] = pd.NaT
dirpath = datapath("io", "json", "data")
v12_json = os.path.join(dirpath, "tsframe_v012.json")
df_unser = pd.read_json(v12_json)
tm.assert_frame_equal(df, df_unser)
df_iso = df.drop(["modified"], axis=1)
v12_iso_json = os.path.join(dirpath, "tsframe_iso_v012.json")
df_unser_iso = | pd.read_json(v12_iso_json) | pandas.read_json |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Output, Input
import plotly.express as px
import plotly.graph_objects as go
import dash_bootstrap_components as dbc
import pandas as pd
import numpy as np
import datetime as dt
from datetime import datetime
#----------------------- Sales Data Preparation -----------------------------#
global df
df = | pd.read_csv("Financial_Sample.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
from woodwork.logical_types import (
URL,
Age,
AgeNullable,
Boolean,
BooleanNullable,
Categorical,
CountryCode,
Datetime,
Double,
EmailAddress,
Filepath,
Integer,
IntegerNullable,
IPAddress,
LatLong,
NaturalLanguage,
Ordinal,
PersonFullName,
PhoneNumber,
PostalCode,
SubRegionCode,
Timedelta
)
from woodwork.statistics_utils import (
_get_describe_dict,
_get_mode,
_make_categorical_for_mutual_info,
_replace_nans_for_mutual_info
)
from woodwork.tests.testing_utils import mi_between_cols, to_pandas
from woodwork.utils import import_or_none
dd = import_or_none('dask.dataframe')
ks = import_or_none('databricks.koalas')
def test_get_mode():
series_list = [
pd.Series([1, 2, 3, 4, 2, 2, 3]),
pd.Series(['a', 'b', 'b', 'c', 'b']),
pd.Series([3, 2, 3, 2]),
| pd.Series([np.nan, np.nan, np.nan]) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 23 21:52:43 2021
@author: q
Goal : Create a Perceptron algorithm
"""
# =============================================================================
# imports
# =============================================================================
# dataset generator
from sklearn.datasets import make_blobs
# train test split
from sklearn.model_selection import train_test_split
# data hadling
import pandas as pd
import numpy as np
# data visualization
import matplotlib.pyplot as plt
import seaborn as sns
# =============================================================================
# program test
# =============================================================================
if __name__ == '__main__':
X, y = make_blobs(n_samples = 500,
centers = 2,
random_state = 0,
cluster_std = 0.8)
y = | pd.Series(y) | pandas.Series |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]),
s0.to_frame())
tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]),
s1.to_frame())
# non-matching produces empty frame
assert pd.DataFrame(s0, columns=[1]).empty
assert pd.DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == 'x'
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, self.frame)
# pass some columns
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, self.frame.loc[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert recons['A'].dtype == np.float64
msg = "Must pass columns with orient='index'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index',
columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
def test_constructor_from_items_scalars(self):
# GH 17312
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 4)])
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 2)], columns=['col1'],
orient='index')
def test_from_items_deprecation(self):
# GH 17320
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
columns=['col1', 'col2', 'col3'],
orient='index')
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
tm.assert_frame_equal(df, self.frame.loc[:, ['A', 'B']])
msg = 'does not match index length'
with pytest.raises(ValueError, match=msg):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_records([(8, 5)],
columns=['a', 'a'])
tm.assert_frame_equal(idf, edf)
pytest.raises(ValueError, DataFrame.from_dict,
OrderedDict([('b', 8), ('a', 5), ('a', 6)]))
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')
tm.assert_frame_equal(df, expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df,
DataFrame(np.zeros(df.shape).astype('float64'),
df.index, df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'),
df.index, df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
tm.assert_frame_equal(df, DataFrame(np.array([['a', 'a'], ['a', 'a']],
dtype=object),
index=[1, 2], columns=['a', 'c']))
pytest.raises(ValueError, DataFrame, 'a', [1, 2])
pytest.raises(ValueError, DataFrame, 'a', columns=['a', 'c'])
msg = 'incompatible data and dtype'
with pytest.raises(TypeError, match=msg):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A': 1, 'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime(2001, 1, 2, 0, 0)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname: 2})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0
# ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array(1., dtype=floatname),
intname: np.array(1, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = {objectname: 1}
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result = result.sort_index()
expected = Series(expected).sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array([1.] * 10, dtype=floatname),
intname: np.array([1] * 10, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
result = result.sort_index()
tm.assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
assert datetime_s.dtype == 'M8[ns]'
df = DataFrame({'datetime_s': datetime_s})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates': dates})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({'End Date': dt}, index=[0])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
df = DataFrame([{'End Date': dt}])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
df = DataFrame({'value': dr})
assert df.iat[0, 0].tz is None
dr = date_range('20130101', periods=3, tz='UTC')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'UTC'
dr = date_range('20130101', periods=3, tz='US/Eastern')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'US/Eastern'
# GH 7822
# preserver an index with a tz on dict construction
i = date_range('1/1/2011', periods=5, freq='10s', tz='US/Eastern')
expected = DataFrame(
{'a': i.to_series(keep_tz=True).reset_index(drop=True)})
df = DataFrame()
df['a'] = i
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': i})
tm.assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range('1/1/2011', periods=5, freq='10s')
df = DataFrame({'a': i, 'b': i_no_tz})
expected = DataFrame({'a': i.to_series(keep_tz=True)
.reset_index(drop=True), 'b': i_no_tz})
tm.assert_frame_equal(df, expected)
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [np.array([None, None, None, None,
datetime.now(), None]),
np.array([None, None, datetime.now(), None])]:
result = DataFrame(arr).get_dtype_counts()
expected = Series({'datetime64[ns]': 1})
tm.assert_series_equal(result, expected)
def test_constructor_for_list_with_dtypes(self):
# TODO(wesm): unused
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int64': 5})
df = DataFrame([np.array(np.arange(5), dtype='int32')
for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int32': 5})
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({'a': [2 ** 31, 2 ** 31 + 1]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
# GH #2751 (construction with no index specified), make sure we cast to
# platform values
df = DataFrame([1, 2])
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame([1., 2.])
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1, 2]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1., 2.]})
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1.}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
# with object list
df = DataFrame({'a': [1, 2, 4, 7], 'b': [1.2, 2.3, 5.1, 6.3],
'c': list('abcd'),
'd': [datetime(2000, 1, 1) for i in range(4)],
'e': [1., 2, 4., 7]})
result = df.get_dtype_counts()
expected = Series(
{'int64': 1, 'float64': 2, datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_constructor_frame_copy(self):
cop = DataFrame(self.frame, copy=True)
cop['A'] = 5
assert (cop['A'] == 5).all()
assert not (self.frame['A'] == 5).all()
def test_constructor_ndarray_copy(self):
df = DataFrame(self.frame.values)
self.frame.values[5] = 5
assert (df.values[5] == 5).all()
df = DataFrame(self.frame.values, copy=True)
self.frame.values[6] = 6
assert not (df.values[6] == 6).all()
def test_constructor_series_copy(self):
series = self.frame._series
df = DataFrame({'A': series['A']})
df['A'][:] = 5
assert not (series['A'] == 5).all()
def test_constructor_with_nas(self):
# GH 5016
# na's in indices
def check(df):
for i in range(len(df.columns)):
df.iloc[:, i]
indexer = np.arange(len(df.columns))[isna(df.columns)]
# No NaN found -> error
if len(indexer) == 0:
def f():
df.loc[:, np.nan]
pytest.raises(TypeError, f)
# single nan should result in Series
elif len(indexer) == 1:
tm.assert_series_equal(df.iloc[:, indexer[0]],
df.loc[:, np.nan])
# multiple nans should result in DataFrame
else:
tm.assert_frame_equal(df.iloc[:, indexer],
df.loc[:, np.nan])
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan])
check(df)
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
# GH 21428 (non-unique columns)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1, 2, 2])
check(df)
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({'a': [np.nan, False]})
assert d['a'].dtype == np.object_
assert not d['a'][1]
def test_constructor_categorical(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([Categorical(list('abc')), Categorical(list('abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
pytest.raises(ValueError,
lambda: DataFrame([Categorical(list('abc')),
Categorical(list('abdefg'))]))
# ndim > 1
pytest.raises(NotImplementedError,
lambda: Categorical(np.array([list('abcd')])))
def test_constructor_categorical_series(self):
items = [1, 2, 3, 1]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
items = ["a", "b", "c", "a"]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=('i4,f4,a10'))
arr[:] = [(1, 2., 'Hello'), (2, 3., "World")]
# TODO(wesm): unused
frame = DataFrame.from_records(arr) # noqa
index = pd.Index(np.arange(len(arr))[::-1])
indexed_frame = DataFrame.from_records(arr, index=index)
tm.assert_index_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2, 3))
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(arr, index=index[:-1])
indexed_frame = DataFrame.from_records(arr, index='f1')
# what to do?
records = indexed_frame.to_records()
assert len(records.dtype.names) == 3
records = indexed_frame.to_records(index=False)
assert len(records.dtype.names) == 2
assert 'index' not in records.dtype.names
def test_from_records_nones(self):
tuples = [(1, 2, None, 3),
(1, 2, None, 3),
(None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=['a', 'b', 'c', 'd'])
assert np.isnan(df['c'][0])
def test_from_records_iterator(self):
arr = np.array([(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5., 5., 6, 6),
(7., 7., 8, 8)],
dtype=[('x', np.float64), ('u', np.float32),
('y', np.int64), ('z', np.int32)])
df = DataFrame.from_records(iter(arr), nrows=2)
xp = DataFrame({'x': np.array([1.0, 3.0], dtype=np.float64),
'u': np.array([1.0, 3.0], dtype=np.float32),
'y': np.array([2, 4], dtype=np.int64),
'z': np.array([2, 4], dtype=np.int32)})
tm.assert_frame_equal(df.reindex_like(xp), xp)
# no dtypes specified here, so just compare with the default
arr = [(1.0, 2), (3.0, 4), (5., 6), (7., 8)]
df = DataFrame.from_records(iter(arr), columns=['x', 'y'],
nrows=2)
tm.assert_frame_equal(df, xp.reindex(columns=['x', 'y']),
check_dtype=False)
def test_from_records_tuples_generator(self):
def tuple_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield (i, letters[i % len(letters)], i / length)
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in tuple_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = tuple_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_lists_generator(self):
def list_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield [i, letters[i % len(letters)], i / length]
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in list_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = list_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_columns_not_modified(self):
tuples = [(1, 2, 3),
(1, 2, 3),
(2, 5, 3)]
columns = ['a', 'b', 'c']
original_columns = list(columns)
df = DataFrame.from_records(tuples, columns=columns, index='a') # noqa
assert columns == original_columns
def test_from_records_decimal(self):
from decimal import Decimal
tuples = [(Decimal('1.5'),), (Decimal('2.5'),), (None,)]
df = DataFrame.from_records(tuples, columns=['a'])
assert df['a'].dtype == object
df = DataFrame.from_records(tuples, columns=['a'], coerce_float=True)
assert df['a'].dtype == np.float64
assert np.isnan(df['a'].values[-1])
def test_from_records_duplicates(self):
result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
expected = DataFrame([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
tm.assert_frame_equal(result, expected)
def test_from_records_set_index_name(self):
def create_dict(order_id):
return {'order_id': order_id, 'quantity': np.random.randint(1, 10),
'price': np.random.randint(1, 10)}
documents = [create_dict(i) for i in range(10)]
# demo missing data
documents.append({'order_id': 10, 'quantity': 5})
result = DataFrame.from_records(documents, index='order_id')
assert result.index.name == 'order_id'
# MultiIndex
result = DataFrame.from_records(documents,
index=['order_id', 'quantity'])
assert result.index.names == ('order_id', 'quantity')
def test_from_records_misc_brokenness(self):
# #2179
data = {1: ['foo'], 2: ['bar']}
result = DataFrame.from_records(data, columns=['a', 'b'])
exp = DataFrame(data, columns=['a', 'b'])
tm.assert_frame_equal(result, exp)
# overlap in index/index_names
data = {'a': [1, 2, 3], 'b': [4, 5, 6]}
result = DataFrame.from_records(data, index=['a', 'b', 'c'])
exp = DataFrame(data, index=['a', 'b', 'c'])
tm.assert_frame_equal(result, exp)
# GH 2623
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi']) # test col upconverts to obj
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts()
expected = Series({'datetime64[ns]': 1, 'object': 1})
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 1])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts().sort_index()
expected = Series({'datetime64[ns]': 1, 'int64': 1})
tm.assert_series_equal(results, expected)
def test_from_records_empty(self):
# 3562
result = DataFrame.from_records([], columns=['a', 'b', 'c'])
expected = DataFrame(columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
result = DataFrame.from_records([], columns=['a', 'b', 'b'])
expected = DataFrame(columns=['a', 'b', 'b'])
tm.assert_frame_equal(result, expected)
def test_from_records_empty_with_nonempty_fields_gh3682(self):
a = np.array([(1, 2)], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(a, index='id')
tm.assert_index_equal(df.index, Index([1], name='id'))
assert df.index.name == 'id'
tm.assert_index_equal(df.columns, Index(['value']))
b = np.array([], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(b, index='id')
tm.assert_index_equal(df.index, Index([], name='id'))
assert df.index.name == 'id'
def test_from_records_with_datetimes(self):
# this may fail on certain platforms because of a numpy issue
# related GH6140
if not is_platform_little_endian():
pytest.skip("known failure of test on non-little endian")
# construction with a null in a recarray
# GH 6140
expected = DataFrame({'EXPIRY': [datetime(2005, 3, 1, 0, 0), None]})
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[ns]')]
try:
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
except (ValueError):
pytest.skip("known failure of numpy rec array creation")
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
# coercion should work too
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[m]')]
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
def test_from_records_sequencelike(self):
df = DataFrame({'A': np.array(np.random.randn(6), dtype=np.float64),
'A1': np.array(np.random.randn(6), dtype=np.float64),
'B': np.array(np.arange(6), dtype=np.int64),
'C': ['foo'] * 6,
'D': np.array([True, False] * 3, dtype=bool),
'E': np.array(np.random.randn(6), dtype=np.float32),
'E1': np.array(np.random.randn(6), dtype=np.float32),
'F': np.array(np.arange(6), dtype=np.int32)})
# this is actually tricky to create the recordlike arrays and
# have the dtypes be intact
blocks = df._to_dict_of_blocks()
tuples = []
columns = []
dtypes = []
for dtype, b in compat.iteritems(blocks):
columns.extend(b.columns)
dtypes.extend([(c, np.dtype(dtype).descr[0][1])
for c in b.columns])
for i in range(len(df.index)):
tup = []
for _, b in compat.iteritems(blocks):
tup.extend(b.iloc[i].values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
recarray2 = df.to_records()
lists = [list(x) for x in tuples]
# tuples (lose the dtype info)
result = (DataFrame.from_records(tuples, columns=columns)
.reindex(columns=df.columns))
# created recarray and with to_records recarray (have dtype info)
result2 = (DataFrame.from_records(recarray, columns=columns)
.reindex(columns=df.columns))
result3 = (DataFrame.from_records(recarray2, columns=columns)
.reindex(columns=df.columns))
# list of tupels (no dtype info)
result4 = (DataFrame.from_records(lists, columns=columns)
.reindex(columns=df.columns))
tm.assert_frame_equal(result, df, check_dtype=False)
tm.assert_frame_equal(result2, df)
tm.assert_frame_equal(result3, df)
tm.assert_frame_equal(result4, df, check_dtype=False)
# tuples is in the order of the columns
result = DataFrame.from_records(tuples)
tm.assert_index_equal(result.columns, pd.Index(lrange(8)))
# test exclude parameter & we are casting the results here (as we don't
# have dtype info to recover)
columns_to_test = [columns.index('C'), columns.index('E1')]
exclude = list(set(range(8)) - set(columns_to_test))
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [columns[i] for i in sorted(columns_to_test)]
tm.assert_series_equal(result['C'], df['C'])
tm.assert_series_equal(result['E1'], df['E1'].astype('float64'))
# empty case
result = DataFrame.from_records([], columns=['foo', 'bar', 'baz'])
assert len(result) == 0
tm.assert_index_equal(result.columns,
pd.Index(['foo', 'bar', 'baz']))
result = DataFrame.from_records([])
assert len(result) == 0
assert len(result.columns) == 0
def test_from_records_dictlike(self):
# test the dict methods
df = DataFrame({'A': np.array(np.random.randn(6), dtype=np.float64),
'A1': np.array(np.random.randn(6), dtype=np.float64),
'B': np.array(np.arange(6), dtype=np.int64),
'C': ['foo'] * 6,
'D': np.array([True, False] * 3, dtype=bool),
'E': np.array(np.random.randn(6), dtype=np.float32),
'E1': np.array(np.random.randn(6), dtype=np.float32),
'F': np.array(np.arange(6), dtype=np.int32)})
# columns is in a different order here than the actual items iterated
# from the dict
blocks = df._to_dict_of_blocks()
columns = []
for dtype, b in compat.iteritems(blocks):
columns.extend(b.columns)
asdict = {x: y for x, y in compat.iteritems(df)}
asdict2 = {x: y.values for x, y in compat.iteritems(df)}
# dict of series & dict of ndarrays (have dtype info)
results = []
results.append(DataFrame.from_records(
asdict).reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict, columns=columns)
.reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict2, columns=columns)
.reindex(columns=df.columns))
for r in results:
tm.assert_frame_equal(r, df)
def test_from_records_with_index_data(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
data = np.random.randn(10)
df1 = DataFrame.from_records(df, index=data)
tm.assert_index_equal(df1.index, Index(data))
def test_from_records_bad_index_column(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
# should pass
df1 = DataFrame.from_records(df, index=['C'])
tm.assert_index_equal(df1.index, Index(df.C))
df1 = DataFrame.from_records(df, index='C')
tm.assert_index_equal(df1.index, Index(df.C))
# should fail
pytest.raises(ValueError, DataFrame.from_records, df, index=[2])
pytest.raises(KeyError, DataFrame.from_records, df, index=2)
def test_from_records_non_tuple(self):
class Record(object):
def __init__(self, *args):
self.args = args
def __getitem__(self, i):
return self.args[i]
def __iter__(self):
return iter(self.args)
recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
tups = lmap(tuple, recs)
result = DataFrame.from_records(recs)
expected = DataFrame.from_records(tups)
tm.assert_frame_equal(result, expected)
def test_from_records_len0_with_columns(self):
# #2633
result = DataFrame.from_records([], index='foo',
columns=['foo', 'bar'])
expected = Index(['bar'])
assert len(result) == 0
assert result.index.name == 'foo'
tm.assert_index_equal(result.columns, expected)
def test_to_frame_with_falsey_names(self):
# GH 16114
result = Series(name=0).to_frame().dtypes
expected = | Series({0: np.float64}) | pandas.Series |
import pandas as pd
import numpy as np
import librosa
import os
import glob
from sphfile import SPHFile
import re
import featuresHelper as featHelper
# Custom headers to use when parsing the annotations
annotationsHeaders=["filename", "_", "speakerName", "start", "end"]
# These annotations pickle was created before hand by merging all the annotations files.
# We pre-compute this to save time. (See featuresHelper.mergeAnnotationsToPickle)
annotations = pd.read_pickle("TEDLIUM-full_annotations.pkl")
pathToData = os.path.join(featHelper.BASE_DIR, 'tedlium', 'data')
features = []
# Iterate through each sound file and extract the features
for index, row in annotations.iterrows():
file_name = os.path.join(pathToData,str(row["filename"])) + '.sph'
if re.search(r"^S\d+$", row["speakerName"]):
continue
# Fragments that are not speech are labeled as follow
# WadeDavis_2003 1 S140 1313.09 1321.34
# Thus, we can recognize them by the third column
# class_label = 'noise' if re.search("^S\d+$", row["speakerName"]) else "speech"
class_label = 'speech'
start = row["start"]
end = row["end"]
data = featHelper.extractMFCC(file_name, float(start), float(end))
features.append([data, class_label])
if index == 1158:
break
# Convert into a Panda dataframe
featuresDF = | pd.DataFrame(features, columns=['feature','class_label']) | pandas.DataFrame |
# coding: utf-8
# In[30]:
#-----------------------------------------------------------------------------------------------------------------
# Simulation experiment
#
# Network:
# A --> gene set C
#
# Algorithm
# Let gene A and B be TF's:
# if gene A expression > threshold_A:
# genes in set C are set to some proportion of the expression of gene A
#
# Apply this algorithm for each sample in the compendium (essentially adding a signal to the existing gene expression data in the compendium)
#
# Hyperparmeters should include:
# 1. Gene A
# 2. Size of gene sets C
# 3. Proportion of gene A expression
# 4. Thresholds
# 5. Percentage to 1.0 (effect size)
# 6. Log file with hyperparameter selections
#-----------------------------------------------------------------------------------------------------------------
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import os
import pandas as pd
import numpy as np
import random
import seaborn as sns
from functions import generate_input, vae, def_offset, interpolate, pca, plot
from numpy.random import seed
randomState = 5
seed(randomState)
# In[2]:
# Hyperparameters
# Transcription factors
# If empty, genes will be randomly assigned
geneA = ''
# Size of the gene set that will be regulated by gene A
geneSetC_size = 1000
# Percentage of gene A expression to use to set new value for each gene in set C
proportion = 1.0
# Threshold for activation of gene A
thresholdA = 0.5
# Amount that genes in gene sets C
effect_sizeA = 0.5
# Name of analysis
analysis_name = 'sim_lin_test'
# In[3]:
# Load arguments
data_file = os.path.join(os.path.dirname(os.getcwd()), "data", "all-pseudomonas-gene-normalized.zip")
# In[4]:
# Read in data
data = pd.read_table(data_file, header=0, sep='\t', index_col=0, compression='zip').T
data.head(5)
# In[5]:
# Randomly select gene A if not specified
# Note: 'replace=False' indicates sampling WITHOUT replacement
if not geneA:
gene_ids = list(data.columns)
[geneA] = np.random.choice(gene_ids, size=1, replace=False)
print(geneA)
# In[6]:
# checkpoint
assert(len(gene_ids)==5549)
# In[7]:
# Randomly select genes for gene set C
# remove() doesn't return a value it will remove the element from the list object
gene_ids.remove(geneA)
print(len(gene_ids))
# Random sample of genes for set C
geneSetC = random.sample(gene_ids, geneSetC_size)
print(len(geneSetC))
# In[8]:
# checkpoint
assert(geneA not in geneSetC)
# In[9]:
# checkpoint
# print(data[geneA])
# In[10]:
# checkpoint
# data.loc[data[geneA]>thresholdA,geneA]
# In[11]:
# checkpoint: before transformation
# data.loc[data[geneA]<=thresholdA,geneSetC[0]]
# In[12]:
# checkpoint
# plot expression of select gene C across all samples BEFORE transformation
# Randomly from gene set C
geneC = random.sample(geneSetC, 1)[0]
# Dataframe with only gene C and only gene A
geneC_only = pd.DataFrame(data[geneC], index=data.index, columns=[geneC])
geneA_only = pd.DataFrame(data[geneA], index=data.index, columns=[geneA])
# Join
X = pd.merge(geneA_only, geneC_only, left_index=True, right_index=True)
# Plot
sns.regplot(x=geneA, y=geneC, data=X, scatter=True)
# In[13]:
# Loop through all samples
num_samples = data.shape[1]
for sample_id in data.index:
row = data.loc[sample_id]
if data.loc[sample_id,geneA] > thresholdA:
# Scale genes by some fixed percentage
for gene in geneSetC:
data.loc[sample_id,gene] = proportion*data.loc[sample_id,geneA]
# if any exceed 1 then set to 1 since gene expression is normalized
data[data>=1.0] = 1.0
# In[14]:
# checkpoint
# plot expression of select gene C across all samples AFTER transformation
# Dataframe with only gene C and only gene A
geneC_only = | pd.DataFrame(data[geneC], index=data.index, columns=[geneC]) | pandas.DataFrame |
import networkx as nx
import numpy as np
import pandas as pd
from flaky import flaky
from pytest import approx
from dowhy.gcm import AdditiveNoiseModel, distribution_change, distribution_change_of_graphs, fit, \
ProbabilisticCausalModel, EmpiricalDistribution
from dowhy.gcm.ml import create_linear_regressor
from dowhy.gcm.shapley import ShapleyConfig
@flaky(max_runs=5)
def test_distribution_change():
X0 = np.random.uniform(-1, 1, 1000)
X1 = 2 * X0 + np.random.normal(0, 0.1, 1000)
X2 = 0.5 * X0 + np.random.normal(0, 0.1, 1000)
X3 = 0.5 * X2 + np.random.normal(0, 0.1, 1000)
original_observations = pd.DataFrame({'X0': X0, 'X1': X1, 'X2': X2, 'X3': X3})
X0 = np.random.uniform(-1, 1, 1000)
X1 = 2 * X0 + np.random.normal(0, 0.1, 1000)
X2 = 2 * X0 + np.random.normal(0, 0.1, 1000)
X3 = 3 * X2 + np.random.normal(0, 0.1, 1000)
outlier_observations = pd.DataFrame({'X0': X0, 'X1': X1, 'X2': X2, 'X3': X3})
causal_model = ProbabilisticCausalModel(nx.DiGraph([('X0', 'X1'), ('X0', 'X2'), ('X2', 'X3')]))
_assign_causal_mechanisms(causal_model)
results = distribution_change(causal_model,
original_observations,
outlier_observations,
'X3',
shapley_config=ShapleyConfig(n_jobs=1))
assert results['X3'] > results['X2']
assert results['X2'] > results['X0']
assert 'X1' not in results
assert results['X0'] == approx(0, abs=0.15)
@flaky(max_runs=5)
def test_distribution_change_of_graphs():
X0 = np.random.uniform(-1, 1, 1000)
X1 = 2 * X0 + np.random.normal(0, 0.1, 1000)
X2 = 0.5 * X0 + np.random.normal(0, 0.1, 1000)
X3 = 0.5 * X2 + np.random.normal(0, 0.1, 1000)
original_observations = pd.DataFrame({'X0': X0, 'X1': X1, 'X2': X2, 'X3': X3})
X0 = np.random.uniform(-1, 1, 1000)
X1 = 2 * X0 + np.random.normal(0, 0.1, 1000)
X2 = 2 * X0 + np.random.normal(0, 0.1, 1000)
X3 = 3 * X2 + np.random.normal(0, 0.1, 1000)
outlier_observations = | pd.DataFrame({'X0': X0, 'X1': X1, 'X2': X2, 'X3': X3}) | pandas.DataFrame |
import hashlib
import inspect
import os.path
import re
import warnings
import h5py
import json
import pprint
import yaml
import uuid
import pandas as pd
import copy
import logging
from pathlib import Path
# import dpath.util
# from . import dpath
from mkpy import dpath
import numpy as np
import matplotlib.pyplot as plt
from mkpy import mkio, pygarv, h5tools
from mkpy.codetagger import CodeTagger
from . import current_function, indent, log_exceptions
from mkpy import get_ver
__version__ = get_ver()
logging.info("Entering " + __name__)
# FIX ME: do something w/ custom exceptions
class BadChannelsError(Exception): # from NJS, deprecated
pass
class BadCalibrateCallError(Exception):
pass
class DuplicateLocationLabelError(Exception):
pass
class MightyWeenieCals(UserWarning):
pass
class EpochSpansBoundary(UserWarning):
pass
class LogRawEventCodeMismatch(UserWarning):
pass
class DigRecordsNotSequential(UserWarning):
pass
class mkh5:
"""Import and prepare ERPSS single-trial data for cross-platform analysis.
This class provides the user API for converting compressed binary
EEG data files into readily accessible HDF5 files.
Parameters
----------
h5_fname : str
Path to a new or existing HDF5 file used as the database.
"""
# BEGIN DEPRECATED dtypes
# _utc = 'u4' # stub for time stamp in microseconds
# _bin_num = 'u4' # 0-base numeric bin index from blf
# END DEPRECATED dtypes
# _chan_num = 'u4' # 0-base numeric channel index from dig header
# _mkAD = 'i2' # kutaslab AD samples are int16
# _art_flag = 'u2' # bit mask for bad channels 0 = good, 1 = bad
# numpy dtypes for the data coming from dig .crw/.raw
_evtick = "u4" # dig crw sample counter, mis-named clk_tick in log2asci
_evcode = "i2" # numeric event codes from dig marktrack *AND* log
_log_ccode = "u2" # numeric condition code from log
_log_flag = "u2" # numeric condition code from log
_mk_EEG = "f2" # kutaslab 12-bit AD or 16 bits after calibrating
_epoch_t = "i8" # positive and negative samples for interval data
_pygarv = "uint64" # 64-bit column to track up to 64 pygarv data tests
# white list of dig raw (*NOT LOG*) event codes for
# splitting the .raw/.crw into mkh5 dblocks
_dig_pause_marks = (-16384,)
# HDF5 slashpath to where epochs tables are stashed in the mkh5 file
EPOCH_TABLES_PATH = "_epoch_tables"
class Mkh5Error(Exception):
"""general purposes mkh5 error"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Mkh5CalError(Exception):
"""mkh5 calibration error"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Mkh5FormatError(Exception):
"""raised on mkh5 format violations"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class YamlHeaderFormatError(Exception):
"""informative errors for bad yhdr YAML files"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class EpochsTableDataError(Exception):
"""raised for pd.Series data we can't or won't directly convert for HDF5
These include mixed num-like and str-like * booleans with missing data
"""
def __init__(self, pd_data_type, series):
if series.name is None:
series.name = "un_named_series"
if series.hasnans:
self.msg = (
"\npandas.Series " + series.name + " {0} data type"
" with missing data/NaN not supported for "
" epochs tables"
).format(pd_data_type)
else:
self.msg = (
"\npandas.Series "
+ series.name
+ " {0} data type not supported for"
" epochs tables"
).format(pd_data_type)
print(self.msg)
class HeaderIO:
"""private-ish helper class for managing mkh5 datablock header information
mkh5 header structures are python dictionaries, serialized for
hdf5 storage as JSON strings, and tucked into the hdf5
attribute so they travel with the datablock.
The dblock header holds information collected/generated from
various sources. Some is read from dig .crw/.log file headers,
some is generated at runtime as the dig data is converted to
mkh5 format. Some is generated/merged in at runtime when the
YAML YAML header info file is processed
* native .crw header from the info dict returned by
mkio._read_header()
* mkh5/hdf5 info added by mkh5_read_raw_log()
- miscellanous
- data stream specs, 1-1 with the dblock data columns
* supplementary information specified in a YAML format text
file and loaded along with the ``.crw`` and ``.log`` files
when they are converted to the dblock, HDF5 format.
The .crw/.dig header can be extended by loading it from a YAML
file. See _load_yhdr() docstring for specs.
"""
class HeaderIOError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
class YAMLClobberError(Exception):
"""raised when a YAML header file tries to overwrite an mkh5 header reserved word
"""
def __init__(self, hio, keyword, yhdr_f=None):
msg = "These are mkh5 header reserved words: "
msg += " ".join([k for k in hio._mkh5_header_types.keys()])
msg += "{0} YAML header files are not allowed to change {1}".format(
yhdr_f, keyword
)
# template for minimal mkh5 datablock header
# These are top-level keys. The header checkers enforce these
# keys and data types and the streams keys and data types.
# Everything else in the header is ad lib.
_mkh5_header_types = {
# as returned by mkio._load_header() from the .crw/.raw
"name": str, # hardcoded in mkio as "dig"
"magic": int, # magic number from dig header
"subdesc": str, # header["subdes"]
"expdesc": str, # header["expdes"]
"odelay": int, # header["odelay"], # ms from trigger to stim about 8 @60Hz
"samplerate": float, # hz,
"recordduration": float, # length of each record in seconds
"recordsize": int, # e.g., 256 # ns * nr of samples in each data record
"nrawrecs": int, # number of raw records
"nchans": int, # : header["nchans"], # number of channels
# set during when mkh5._read_raw_log() reads .crw, .log
"eeg_file": str, # .crw file name as passed to _read_raw_log
"eeg_file_md5": str,
"log_file": str, # .log file name as passed to _read_raw_log
"log_file_md5": str,
# ('uuid_file', str), # not implemented
"streams": dict, # items are 1-1 (unordered) for dblock columns
"h5_dataset": str, # set upon construction to the dblock h5py.Dataset.name
}
# minimal stream item upon loading into .crw/.log info into a mkh5.dblock_N
_mkh5_header_stream_types = {
"name": str, # dig channel name, e.g., 'lle', 'MiPf'
"jdx": int, # column index in dblock_N dataset, 0, 1, ...
"source": str, # source pfx[NNNN] where pfx = eeg|log|other, NNNN enumerates
"dt": str, # string np.dtype, e.g., '<f2', '<i4'
# from mkh5._h5_update_eeg() where h5_path + dblock_id is the
}
def __init__(self):
"""wake up"""
self._json_key = (
"json_header" # key used to access h5py.Dataset.attrs[]
)
self._header = None
self._slicer = None
# ------------------------------------------------------------
# PUBLIC CRUD
#
# CRU *D*: Delete not implemented
# ------------------------------------------------------------
@property
def header(self):
"""expose header data like a read-only attribute"""
return self._header
# Create/Update
def new(self, hdr_dict, yhdr_f):
"""merge a dictionary and dict from the YAML file into a well-formed
mkh5 datablock header or die
"""
self._create(hdr_dict, yhdr_f)
self._check_header()
# read
def get(self, dblock):
"""load header info from dblock into self._header
Parameters
----------
dblock : h5py.Dataset
The HDF5 dataset whose attribute 'json_header' holds the header JSON string.
"""
if not isinstance(dblock, h5py.Dataset):
raise TypeError(
"dblock must be an h5py.Dataset not " + dblock.__class__
)
assert self._json_key in dblock.attrs.keys()
json_str = dblock.attrs[self._json_key]
self._header = json.loads(
json_str
) # decode json into the header dict
self._check_streams(
dblock
) # are header streams 1-1 with datablock columns?
self._check_header() # general header check
# update
def set(self, dblock):
"""jsonify the current self._header as value of dblock.attrs[self._json_key]
Parameters
----------
dblock : h5py.Dataset
writeable mkh5 datablock reference
"""
if not isinstance(dblock, h5py.Dataset):
raise TypeError(
"dblock must be an h5py.Dataset not " + dblock.__class__
)
self._header["h5_dataset"] = dblock.name
self._check_streams(dblock)
self._check_header()
# good to go ...jsonify stuff the string into the hdf5 attribute
json_header = json.dumps(self._header)
if len(json_header) > 2 ** 16:
msg = "jsonified header info exceeds 64KB ... too big for hdf5 attribute"
raise ValueError(msg)
dblock.attrs[self._json_key] = json_header
# C *R *UD: header content retrieval
def set_slicer(self, slicer_f):
"""load YAML header slicer for selecting subsets of mkh5 header values
Parameters
----------
slicer_f : str
YAML file in mkh5 header slicer format
Returns
-------
None
side effect: sets self._slicer
* The mkh5 header is a tree structure (dict) with branches
that terminate in data.
* The mkh5 header slicer is an mkh5 header subtree
"template" that contains
- terminating branches only
- string labels as terminals, e.g., col_0, col_1
Ex. , ['key_0', 'key_1', ... 'key_i', col_0]
* Walking through header slicer with dpath.util.get(path)
fetches the data value at the end of the path and we
label it with the slicer column name like so
[ (col_0, val_0), ... (col_n, val_n)]
This converts neatly to wide tabular format
+---------+------+----------+
|col_0 | ... | col_j |
+=========+======+==========+
| value_1 | ... | value_n |
+---------+------+----------+
Examples
.. code-block:: yaml
# here is some YAML header info
---
runsheet:
age: 22
SAT_math: 720
SAT_verbal: 680
handedness: L/L
mood_VAS: 4.5
The YAML header slicer follows matching paths into that header
to pluck out the terminal data values (leafs) and (re-)label them
.. code-block:: yaml
# here is an extractor for the header
---
runsheet:
mood_VAS: mood
handedness: fam_hand
age: age
.. note::
``key:value`` order does not matter
This next slicer specifies the same **paths** into the
header tree and extracts exactly the same **values**
.. code-block:: yaml
---
runsheet:
age: age
handedness: fam_hand
mood_VAS: mood
The slicer paths are the same for both:
runsheet/mood_VAS/mood
runsheet/handedness/fam_hand
runsheet/age/age
Algorithm
* HeaderIO.get_slices() extracts the header values at the end of the path, i.e.,
22, L/L, 4.5 and pairs each datum with its path-matching slicer label like so
[ (sub_age, 22), (fam_hand, 'L/L') ]
* mkh5.get_event_table() converts these to wide-format and
merges them with the rest of the single trial event code
column information it gets from the code tag mapper.
sub_age fam_hand
22 'L/L'
"""
self._slicer_f = slicer_f
self._slicer = self._load_yaml_slicer(slicer_f)
return None
def _load_xlsx_slicer(self, slicer_f):
"""load code slicer from Excel .xlsx file and return pandas
DataFrame. Default is first sheet, use slicer_f!sheet_name
to select sheet_name
"""
slicer = None
# if a sheet is specified w/ slicer!sheet use it, otherwise
# set to 0 for default first sheet
slicer_f_reob = re.match(
r"(?P<xl_f>.+\.xls[xm])[\!]*(?P<sheet_name>.*)$", slicer_f
)
xl_f = slicer_f_reob["xl_f"]
sheet_name = slicer_f_reob["sheet_name"]
if len(sheet_name) == 0:
sheet_name = 0
slicer = pd.read_excel(
xl_f, sheet_name=sheet_name, header=0 # , index_col="Index"
)
if slicer is not None:
return slicer
def _load_txt_slicer(self, slicer_f):
"""load tab-separated UTF-8 text file and return pandas DataFrame"""
raise NotImplemented
with open(slicer_f, "r") as d:
mapper = pd.read_table(
slicer_f, delimiter="\t", header=0 # , index_col="Index"
)
return mapper
def _load_yaml_slicer(self, slicer_f):
"""load yaml mapper file and return pandas DataFrame"""
# slurp the slicer
slicer_dict, hdocs, md5 = self._load_yaml_docs(slicer_f)
slicers = []
slicer_paths = [
dpath.path.paths_only(x)
for x in dpath.path.paths(slicer_dict, dirs=False, leaves=True)
]
for path in slicer_paths:
slicers.append((path[-1], path[:-1]))
slicers = dict(slicers)
return slicers
def get_slices(self):
"""slice out data values from dblock header for use in event table columns
Parameters
----------
slicer : dict
dictionary of col_name: slash_pattern where,
* col_name (string) is the dict key that will appear as a table column heading
* search_path (list of strings) as [ 'key1', 'key2', ... key_n] to probe header
Returns
-------
slicer : list of 2-ples, possibly empty)
each tuple is (col_name, datum) where
datum : object
leaf returned by dpath.util.get(self._header, search_path)
Raises
------
RuntimeError if HeaderIO instance doesn't have self._header or self._slicer dicts
RuntimeError if dpath.util.get finds multiple values
"""
if self._header is None or not isinstance(self._header, dict):
msg = (
"load header from a datablock with HeaderIO.get(dblock) "
"before slicing"
)
raise RuntimeError(msg)
if self._slicer is None or not isinstance(self._slicer, dict):
msg = (
"set self._slicer = HeaderIO._load_yaml_docs(yaml_f) "
"before slicing"
)
raise RuntimeError(msg)
slices = list()
hdr_paths = [
p
for p in dpath.path.paths(
self._header, dirs=False, leaves=False
)
]
for k, v in self._slicer.items():
this_slice = None
try:
datum = dpath.util.get(self._header, v)
this_slice = (k, datum)
except Exception as fail:
if isinstance(fail, KeyError):
this_slice = (k, float("NaN")) # key not found
elif isinstance(
fail, ValueError
): # multiple values ... shouldn't happen
msg = "mutiple leaves match dpath glob ... but how?"
raise ValueError(msg)
else:
print("some horrible error in HeaderIO.get_slices()")
raise fail
slices.append(this_slice)
return slices # possibly empty
# ------------------------------------------------------------
# PRIVATE-ish CRUD
# ------------------------------------------------------------
# this is the only way to form an mkh5 header dict ... seed_dict + YAML
def _create(self, seed_dict, yhdr_f):
"""merges seed_dict ON TOP of the YAML and set self._header"""
# yhdr_f Path should be stringified by mkh5.create_mkdata
if not isinstance(yhdr_f, str):
raise HeaderIOError(
f"please report mkpy bug: {yhdr_f} must be a str not {type(yhdr_f)}"
)
if not isinstance(seed_dict, dict):
msg = "seed_dict is not a dict"
raise TypeError(msg)
self._header = seed_dict
# mkh5 headers know about the h5_path/block_id but
# dicts pulled from .crw headers don't, so add a stub
for k in ["name", "h5_dataset"]:
if k not in seed_dict:
self._header[k] = ""
# load YAML info and merge it in
yhdr = self._load_yhdr(yhdr_f)
assert isinstance(yhdr, dict)
self._update_from_dict(yhdr, keep_existing=True)
self._check_header()
def _update_from_dict(self, new_dict, keep_existing=True):
"""Update current header dictionary from other dictionary.
Parameters
----------
keep_existing : bool, default is True
``True`` protects self._header[k] from being overwritten
by new_dict[k]. ``False`` allows ``new_dict[k]`` to overwrite
``self._header[k]``
Notes
-----
dpath.util.merge(A,B) merges B **ON TOP** of A, so
B[key]:value sets (overwrites) A[key]:value.
"""
self._check_header()
if keep_existing:
# merge existing on top of the new to preserve existing
dpath.util.merge(new_dict, self._header)
self._header = copy.deepcopy(new_dict)
else:
# merge new on top of existing to clobber the new
dpath.util.merge(self._header, new_dict)
self._header = copy.deepcopy(self._header)
self._check_header()
def _update_from_slashpaths(self, slash_vals):
""" header date via dpath (slash_path:value) syntax
Parameters:
slash_vals (slashpath, value) 2ple or iteratble of them
"""
self._check_header()
for sv in slash_vals:
# gate keeper
if not (
isinstance(sv, tuple)
and len(sv) == 2
and isinstance(sv[0], str)
):
msg = (
"to set header with slashpath use a ('slash/path', value) 2-ple"
+ " or a list of them"
)
raise TypeError(msg)
# FIX ME: protect reserved keys??
old_val = dpath.util.search(self._header, sv[0])
if len(old_val) == 0 and isinstance(old_val, dict):
# print('new key:value ', sv[0], sv[1])
dpath.util.new(self._header, sv[0], sv[1])
else:
# print('setting existing key ', sv[0], 'old: ', old_val, 'new: ', sv[1])
nset = dpath.util.set(self._header, sv[0], sv[1])
if nset is None:
raise RuntimeError(
"failed to set " + sv[0] + " = " + sv[1]
)
self._check_header()
def _load_yaml_docs(self, yml_f):
"""generic multi-doc YAML loader for header data and extractor files"""
# FIX ME: add YAML linter
# check for legal yaml
with open(yml_f, "r") as f:
yml_str = f.read()
hdocs = yaml.load_all(yml_str, Loader=yaml.SafeLoader)
yml_f_md5 = hashlib.md5(yml_str.encode("utf8")).hexdigest()
# load up the docs w/ modicum of error checking
yml = dict()
doc_names = [] #
for i, hdoc in enumerate(hdocs):
if hdoc is None:
msg = "uh oh ... empty YAML document in "
msg += "{0} perhaps a stray ---".format(yml_f)
raise mkh5.YamlHeaderFormatError(msg)
if "name" not in hdoc.keys():
msg = "\n{0}\n".format(self._load_yaml_docs.__doc__)
msg += "{0} document {1} does not have a name".format(
yml_f, i
)
raise mkh5.YamlHeaderFormatError(msg)
if len(hdoc["name"]) == 0:
msg = "{0} document {1}: length of name appears to be 0".format(
yml_f, i
)
raise mkh5.YamlHeaderFormatError(msg)
if hdoc["name"] in doc_names:
msg = "{0} duplicate document name {1}".format(
yml_f, hdoc["name"]
)
raise mkh5.YamlHeaderFormatError(msg)
else:
# the "dig" keys are not nested under the document name
if hdoc["name"] == "dig":
dpath.util.merge(yml, hdoc)
else:
doc_names.append(hdoc["name"])
yml[hdoc["name"]] = hdoc
return (yml, doc_names, yml_f_md5)
def _load_yhdr(self, yhdr_f):
"""load a YAML format header extension
Syntax:
* Must conform to YAML spec (?2.0)
* There MUST be at least one YAML document
* EACH YAML docs must contain a ``name`` key and string
value
OPTIONAL
* Additional YAML documents may be added to the file ad lib provided
each document is named with a key-value pair like so
---
name: doc_name
where doc_name is a string and not used for any other
document in the file.
* Additional YAML data may be specified ad lib. to extend
any document or data.
* Apparatus doc is a fixed-format map with these keys and
values
name: "apparatus"
space:
fiducial:
sensor:
stream:
The ``fiducial``, ``sensor``, and ``stream`` data are
each given as 2-level maps where the top level key gives
the "name", e.g., lle, MiPf, HEOG, nasion and the nested
key:value pairs give the value of the key, e.g., gain:
10000 for an amplifier channel or x: 18.9 for 3D
coordinate x. This affords easy conversion to tabular
format where top level keys index rows and, nested keys
are column labels, and nested values are column data.
* fiducial must contain top-level keys "nasion", "lpa",
"rpa" and nested keys x, y, z
* sensor must contain top-level keys naming electrodes,
e.g. lle, MiPf and nested keys x, y, z
* stream must contain top-level keys naming digital data
channels, e.g., lle, MiPf, HEOG (note these are
recordings/data streams *NOT* electrodes). Nested keys
must contain pos, neg indicating, respectively, the
positive polarity sensor and its reference (a string
name) which may be another electrode (A1, lhz) or not
(avg).
"""
yhdr, doc_names, yhdr_md5 = self._load_yaml_docs(yhdr_f)
# make sure the YAML dict doesn't step on basic top-level header info
new_keys = yhdr.keys()
for (h_key, h_type) in self._mkh5_header_types.items():
if h_key in new_keys:
raise self.YAMLClobberError(self, h_key, yhdr_f)
# enforce special case requirements here
if "apparatus" in doc_names:
for m in ["space", "fiducials", "sensors", "streams"]:
if m not in yhdr["apparatus"].keys():
msg = "{0} apparatus document {1} map not found".format(
yhdr_f, m
)
raise mkh5.YamlHeaderFormatError(msg)
# self-identify
yhdr["yhdr_file"] = yhdr_f
yhdr["yhdr_file_md5"] = yhdr_md5
return yhdr
def _check_header(self):
"""enforce mandatory minimum mkh5 header data structure"""
# check for mandatory keys and values of the right type
header_keys = self._header.keys()
for h_key, dtype in self._mkh5_header_types.items():
if h_key not in header_keys:
msg = (
f'uh oh ... mandatory key "{h_key}" is missing from '
"mkh5 dblock header:\n"
)
msg += pprint.pformat(self._header.keys())
raise RuntimeError(msg)
if not isinstance(self._header[h_key], dtype):
msg = "uh oh ... bad header value datatype: "
msg += "{0} should be {1} not {2}".format(
h_key, dtype, self._header[h_key].__class__
)
raise RuntimeError(msg)
# check the stream items
mand_stream_keys = self._mkh5_header_stream_types.keys()
mand_stream_types = self._mkh5_header_stream_types.values()
for (k, v) in self._header["streams"].items():
for sk, sv in v.items():
if sk in mand_stream_keys and not isinstance(
sv, self._mkh5_header_stream_types[sk]
):
msg = 'uh oh ... stream {0}["{1}"] bad value datatype: '.format(
sk, sv
)
msg += "should be {1} not {2}".format(
self._mkh5_header_stream_types[sk], sv.__class__
)
raise RuntimeError(msg)
def _check_streams(self, dblock):
"""enforce agreement between self._header streams and the data block
Parameters
----------
dblock (h5py.Dataset) readble mkh5 data block (reference)
* checks the fields in _mkh5_header_stream_types
_mkh5_header_stream_types = {
'name': str, # dig channel name, e.g., 'lle', 'MiPf'
'jdx': int, # column index in dblock_N dataset, 0, 1, ...
'source': str, # source pfx[NNNN] where pfx = eeg|log|other, NNNN enumerates
'dt': str, # string np.dtype, e.g., '<f2', '<i4'
# from mkh5._h5_update_eeg() where h5_path + dblock_id is the
Raises:
RuntimeError on mismatch
columns: labels, column order=jdx, and data type
"""
self._check_header() # first things first
for jdx, col in enumerate(dblock.dtype.names):
try:
assert col in self.header["streams"].keys()
assert jdx == self.header["streams"][col]["jdx"]
assert (
dblock.dtype[col] == self.header["streams"][col]["dt"]
)
except:
msg = "uh oh ... header['streams'] is missing a data block column"
raise TypeError(msg)
for k, v in self.header["streams"].items():
try:
assert k in dblock.dtype.names
this_jdx = v["jdx"]
assert dblock.dtype.names[this_jdx] == k
assert dblock.dtype[this_jdx] == v["dt"]
except:
msg = "uh oh ... header['streams'] has an extra stream"
raise TypeError(msg)
# log data types ... ticks are uint64, everything else can be int16
# _log_dtype = np.dtype([
# ("log_evticks", _evtick),
# ("log_evcodes", _evcode),
# ("log_ccodes", _log_ccode),
# ("log_flags", _log_flag),
# ])
# structure to merge dig marktrack and log info
# FIX ME FOR EPOCHS
_event_dtype = np.dtype(
[
("evticks", _evtick),
("raw_evcodes", _evcode),
("log_evcodes", _evcode),
("log_ccodes", _log_ccode),
("log_flags", _log_flag),
]
)
_dblock_slicer_dtype = np.dtype(
[
("start_samps", _epoch_t),
("anchor_samps", _epoch_t),
("stop_samps", _epoch_t),
]
)
# define a datatype for the bin-event table ... essentially a decorated log
_bin_event_dtype = np.dtype(
[
("evticks", _evtick),
("raw_evcodes", "i2"),
("log_evcodes", "i2"),
("log_ccodes", "u2"),
("log_flags", "u2"),
("bin_nums", "u2"),
("bin_descs", "S64"), # icky ... hardcoded string lengths
]
)
# FIX ME ... non-event type epoch?
_epoch_dtype = np.dtype(
[
("anchor_ticks", _evtick),
("raw_evcodes", _evcode),
("epoch_starts", _evtick),
("epoch_stops", _evtick),
]
)
# for dumping instance info ...
# pp = pprint.PrettyPrinter(indent=4)
def __init__(self, h5name):
"""initialize and set mkh5 file name for this instance. If the file
doesn't exist, create it. If it exists, test read/writeability
Parameters:
h5name (string) file path to the mkh5 format hdf5 file.
"""
if isinstance(h5name, Path):
h5name = str(h5name)
# each mkh5 instance is tied to one and only one hdf5 format file
logging.info(indent(2, "h5name is " + h5name))
self.h5_fname = h5name
# if file doesn't exist open, an empty hdf5 file
if not os.path.isfile(self.h5_fname):
self.reset_all()
else:
# file exists, warn if not readable or read-writable
with h5py.File(self.h5_fname, "r") as h5:
source_ver = __version__.split(r".")[0] # major version
if "version" in h5.attrs.keys():
file_ver = h5.attrs["version"].split(r".")[0]
else:
file_ver = None
if file_ver is not None and source_ver != file_ver:
msg = "version mismatch: source=={0} file {1}=={2}".format(
source_ver, self.h5_fname, file_ver
)
warnings.warn(msg)
# try to open for read-write and let h5py deal with problems
try:
with h5py.File(self.h5_fname, "r+") as h5:
pass
except:
msg = "{0} is read only".format(self.h5_fname)
warnings.warn(msg)
# h5 introspection
def _headers_equal(self, header1, header2):
"""returns boolean if all header dict keys and values are =="""
raise NotImplemented("FIX ME WITH dpath.utils NOW")
k1 = sorted(header1.keys())
k2 = sorted(header2.keys())
# if k1==k2 can iterate on either set of keys
if k1 != k2 or any([header1[k] != header2[k] for k in k1]):
return False
else:
return True
# ------------------------------------------------------------
# Public artifact tagging
# ------------------------------------------------------------
def garv_data_group(self, h5_data_group_path, skip_ccodes=[0]):
"""Run `pygarv` on all the dblocks under the `h5_data_group_path`.
Parameters
----------
h5_data_group_path : str
name of the h5 datagroup containing the dblocks to screen
skip_ccodes : list of uint, None ([0])
dblocks with log_ccodes in the list are not scanned. Default
is [0] to skip calibration blocks. Setting to None disables
the skipper and scans all dblocks in the data group
"""
dblock_paths = h5tools.get_dblock_paths(
self.h5_fname, h5_data_group_path
)
for dbp in dblock_paths:
hdr, dblock = self.get_dblock(dbp)
# ccodes can only change on dig start or pause so
# homogenous on unless there is some goofy log
# poking going on ...
log_event_idxs = np.where(dblock["log_evcodes"] != 0)[0]
assert (
dblock["log_ccodes"][log_event_idxs].max()
== dblock["log_ccodes"][log_event_idxs].min()
)
log_ccode = dblock["log_ccodes"][log_event_idxs[0]]
if skip_ccodes is not None:
if log_ccode in skip_ccodes:
msg = "pygarv skipping {0} with " "log_ccode {1}".format(
dbp, log_ccode
)
print(msg)
continue
print("pygarving {0} log_ccode {1}".format(dbp, log_ccode))
with h5py.File(self.h5_fname, "r+") as h5:
h5[dbp]["pygarv"] = pygarv._garv_dblock(hdr, dblock)
# ------------------------------------------------------------
# Public event code tag mapping and epoching utilities
# ------------------------------------------------------------
def get_event_table(self, code_map_f, header_map_f=None):
"""Reads the code tag and header extractor and returns an event lookup table
Parameters
----------
code_map_f : str
Excel, YAML, or tab-separated text, see mkh5 docs for
format details.
header_map_f : str
YAML header extractor file, keys match header keys, values specify
name of the event table column to put the header data
Returns
-------
event_table : pandas.DataFrame
See Note.
Note
----
1. This sweeps the code tag map across the data to generate a lookup
table for specific event (sequence patterns) where the rows specify:
* slashpath to the mkh5 dblock data set and sample index
for pattern-matching events.
* all the additional information for that pattern given in
the code tag map
The event table generated from mkh5 data and the code_map
specification is in lieu of .blf (for EEG epoching and
time-locking), .rts (for event-to-event timing), and .hdr
(for experimental design specification).
2. ``ccode`` Special column. If the code tag map has a column
named ``ccode`` the code finder finds events that match the
code sequence given by the regex pattern **AND** the
log_ccode == ccode. This emulates Kutas Lab `cdbl` event
lookup and to support, e.g., the condition code == 0 for
cals convention and blocked designs where the `ccode`
varies block to block. If the code map does not specify
a ``ccode``, column the `log_ccode` column is ignored for
pattern matching.
"""
# instantiate the codemapper w/ its map and code finder
ctagger = CodeTagger(code_map_f)
if "ccode" in ctagger.code_map.columns:
msg = (
f"\nAs of mkpy 0.2.0 to match events with a codemap regexp pattern, the\n"
f"ccode column in {Path(code_map_f).name} must also match the log_ccode\n"
f"in the datablock. If this behavior is not desired, delete or rename\n"
f"the ccode column in the codemap."
)
warnings.warn(msg)
# set up to extract info from the header
hio = self.HeaderIO()
if header_map_f is not None:
hio.set_slicer(header_map_f)
# fetch all data that have at least one mkh5 datablock (dblock_0)
match_list = []
dgroup_paths = h5tools.get_data_group_paths(self.h5_fname)
with h5py.File(self.h5_fname, "r") as h5:
for dgp in dgroup_paths:
dblock_paths = h5tools.get_dblock_paths(self.h5_fname, dgp)
for dbp in dblock_paths:
assert dgp in dbp # group and data block must agree
hio.get(h5[dbp]) # need this for srate at least
# slice the header if there is an extractor
if hio._slicer is not None:
hdr_data = hio.get_slices()
else:
hdr_data = []
print("searching codes in: " + dbp)
event_idxs = (
h5[dbp]["log_evcodes"] != 0
) # samples w/ non-zero events
dblock_ticks = h5[dbp]["dblock_ticks"][event_idxs]
crw_ticks = h5[dbp]["crw_ticks"][event_idxs]
raw_evcodes = h5[dbp]["raw_evcodes"][event_idxs]
log_evcodes = h5[dbp]["log_evcodes"][event_idxs]
log_ccodes = h5[dbp]["log_ccodes"][event_idxs]
log_flags = h5[dbp]["log_flags"][event_idxs]
# iterate on keys which are the code patterns
for idx, cm in ctagger.code_map.iterrows():
# matches is a list of lists of dict, one dict for each group
code_pattern_matches = ctagger._find_evcodes(
cm["regexp"], dblock_ticks, log_evcodes
)
if code_pattern_matches is not None:
for m in code_pattern_matches:
for mm in m:
match_tick, anchor_tick, is_anchor = (
None,
None,
None,
)
for k, v in mm:
if k == "match_tick":
match_tick = v
if k == "anchor_tick":
anchor_tick = v
if k == "is_anchor":
is_anchor = v
assert all(
[
v is not None
for v in [
match_tick,
anchor_tick,
is_anchor,
]
]
)
if is_anchor:
assert anchor_tick == match_tick
else:
assert anchor_tick != match_tick
# ok, this is the tick of the pattern match
# and it must be unique
tick_idx = np.where(
dblock_ticks == match_tick
)[0]
assert len(tick_idx) == 1
sample_data = [
# ("Index", idx),
("data_group", dgp),
("dblock_path", dbp),
("dblock_tick_idx", tick_idx[0]),
(
"dblock_ticks",
dblock_ticks[tick_idx][0],
),
("crw_ticks", crw_ticks[tick_idx][0]),
(
"raw_evcodes",
raw_evcodes[tick_idx][0],
),
(
"log_evcodes",
log_evcodes[tick_idx][0],
),
(
"log_ccodes",
log_ccodes[tick_idx][0],
),
("log_flags", log_flags[tick_idx][0]),
(
"epoch_match_tick_delta",
0,
), # an event is a one sample epoch
("epoch_ticks", 1),
(
"dblock_srate",
hio.header["samplerate"],
), # for conversion to times
]
# extend sample data w/ the header information
# which may be None
sample_data = sample_data + hdr_data
# extend sample_data w/ the match info and code map row
sample_data = (
sample_data
+ mm
+ list(zip(cm.index, cm))
)
match_list.append(
(sample_data)
) # list of tuples
# pprint.pprint(match_list)
# handle no matches ...
if len(match_list) > 0:
event_table = pd.DataFrame([dict(m) for m in match_list])
# codemap ccode triggers backwards compatibility
# with Kutas Lab ERPSS cdbl
if "ccode" in ctagger.code_map.columns:
event_table = event_table.query("ccode == log_ccodes")
# event_table.set_index("Index", inplace=True)
self._h5_check_events(self.h5_fname, event_table)
return event_table
else:
raise RuntimeError(
"uh oh ... no events found for {0}".format(code_map_f)
)
def _h5_check_events(self, h5_f, e_table):
"""check the match event in event or epoch table agrees with the
dblock data
Parameters
----------
h5_f : str
path to mkh5 format hdf5 file
e_table: (pandas.DataFrame, np.ndarray)
as returned by mkh5.get_event_table()
Returns
-------
None for success
Raises
------
RuntimeError on event_table[e] vs. dblock[e] mismatch or missing columns
The minimum mandatory column names for an event table are
* data_group: full slashpath to the h5py.Group covering a
sequence of dblocks, e.g.,
S001
Expt1/Session1/S047
* dblock_path: full slashpath from the hdf5 root to one of the
daughter dblock_N h5py.Datasets (without leading /), e.g.,
S001/dblock_0
Expt1/Session1/S047/dblock_12
* dblock_ticks: the A/D sample counter which is also the row
index of the dblock where the *matched* event appearing in
the event table occurred.
* match_code: the event code of the regexp pattern-matched
group for this event table row. There is one match code for
each capture group in the regular expression pattern, so
the match code need not be the anchor code
* All anchors are matches. Some matches may not be anchors
* log_evcodes: the sequence of integer event codes occuring at
each dblock tick in the original crw/log file
"""
if isinstance(e_table, np.ndarray):
e_table = pd.DataFrame(e_table)
min_cols = subset = [
"data_group",
"dblock_path",
"dblock_ticks",
"log_evcodes",
"match_code",
]
for c in min_cols:
if not c in e_table.columns:
msg = 'mkh5 event table column "{0}"'.format(c)
msg += " is missing, all these are mandatory:" + " ".join(
min_cols
)
raise RuntimeError(msg)
with h5py.File(h5_f, "r") as h5:
for index, e in e_table.iterrows():
# These should only fail if the datablocks or event table have
# been monkeyed with. Anyone who can do that can chase down
# the assertion exception.
data = h5[e["dblock_path"]][e["dblock_ticks"]]
assert e["data_group"] in e["dblock_path"]
# the log event code must be an anchor or a match
if e["match_code"] != e["anchor_code"]:
assert e["match_code"] == data["log_evcodes"]
else:
assert e["anchor_code"] == data["log_evcodes"]
check_cols = [
col for col in e.index if col in data.dtype.names
]
for col in check_cols:
assert data[col] == e[col]
def _check_epochs_table(self, epochs_table):
"""check a set epochs table for event codes, epoch length, and offset
Parameters
----------
epochs_table : pd.DataFrame
event table format with extra columns:
epoch_ticks = fixed epoch duration in units of samples
epoch_match_tick_delta = number of samples from epoch start to matched event code
Returns
-------
None
Raises
------
ValueError
if epoch event codes and data blocks column values do not match the datablocks or
epoch length and start offset values are not uniform across the epochs.
"""
epoch_required_columns = ["epoch_ticks", "epoch_match_tick_delta"]
# epochs tables are an extension of event tables, check the events first
self._h5_check_events(self.h5_fname, epochs_table)
# then epoch length and matched code offset (in samples)
for c in epoch_required_columns:
if c not in epochs_table.dtype.names:
msg = "epochs table missing required column {0}".format(c)
raise ValueError(msg)
vals = np.unique(epochs_table[c])
if len(vals) > 1:
msg = "epochs table column {0} values must be the same: {1}".format(
c, vals
)
raise ValueError(msg)
def _h5_check_epochs_table_name(self, h5_f, epochs_table_name):
"""look up and check a previously set epochs table
Parameters
----------
h5_f : str
name of an mkh5 format HDF5 file, e.g., self.h5_fname or other
epochs_table_name : str
name of an epochs table, must exist in h5_f
Returns
-------
None
Raises
------
ValueError
if something is wrong with the lookup or table itself
"""
eptbl = self.get_epochs_table(epochs_table_name, format="numpy")
self._check_epochs_table(eptbl)
def set_epochs(self, epochs_table_name, event_table, tmin_ms, tmax_ms):
"""construct and store a named EEG epochs lookup-table in self['epcochs']
For storing in hdf5 the columns must be one of these:
string-like (unicode, bytes)
int-like (int, np.int, np.uint32, np.uint64)
float-like (float, np.float32, np.float64)
Parameters
----------
epochs_table_name : string
name of the epochs table to store
event_table : pandas.DataFrame
as returned by mkh5.get_event_table()
tmin_ms : float
epoch start in millseconds relative to the event, e.g, -500
tmax_ms : float
epoch end in millseconds relative to the event, e..g,
1500, strictly greater than tmin_ms
Returns
-------
None
updates h5_f/EPOCH_TABLES_PATH/ with the named epoch table h5py.Dataset
The epochs table is a lightweight lookup table specific to
this mkh5 instance's hdf5 file,
h5['epochs'][epochs_table_name] = epochs_table
Event tables by default are "epochs" 1 sample long with 0
prestimulus.
This simply updates the prestimulus interval and length
accordingly, adds the peri-event time interval information for
slicing mkh5 datablocks and massages the event table
(pandas.DataFrame) into a numpy ndarray for hdf5 storage.
For reproducibility, by design epochs tables can be added to
an mkh5 file but not overwritten or deleted. If you need to
the revise the epochs, rebuild the mkh5 file from crws/logs
with the ones you want.
"""
with h5py.File(self.h5_fname, mode="r") as h5:
if (
mkh5.EPOCH_TABLES_PATH in h5.keys()
and epochs_table_name in h5[mkh5.EPOCH_TABLES_PATH].keys()
):
msg = (
f"epochs name {epochs_table_name} is in use, "
f"pick another name or use reset_all() to "
f"completely wipe the mkh5 file: {self.h5_fname}"
)
raise RuntimeError(msg)
# event_table = self.get_event_table(code_map_f)
if event_table is None:
raise ValueError("uh oh, event_table is empty")
self._h5_check_events(self.h5_fname, event_table)
# ------------------------------------------------------------
# 1. sanitize the pandas.Dataframe columns
# ------------------------------------------------------------
print("Sanitizing event table data types for mkh5 epochs table ...")
# enforce Index data type is str or int
# try:
# msg = None
# if event_table.index.values.dtype == np.dtype("O"):
# maxbytes = max(
# [len(x) for x in event_table.index.values.astype(bytes)]
# )
# index_dt_type = "S" + str(maxbytes)
# elif event_table.index.values.dtype == np.dtype(int):
# index_dt_type = "int"
# else:
# msg = "uh oh, cannot convert event table index column to bytes or integer"
# except Exception as err:
# print(msg)
# raise err
# # move Index into columns for santizing
# event_table = event_table.reset_index("Index")
# remap pandas 'O' dtype columns to hdf5 friendly np.arrays if possible
tidy_table = pd.DataFrame()
for c in event_table.columns:
# do by column so failures are diagnostic. Pass in a copy
# so nan handling can mod the series in place without
# pd warning "setting value on copy"
tidy_table[c] = self._pd_series_to_hdf5(event_table[c].copy())
event_table = tidy_table
# 2. define a numpy compound data type to hold the event_table
# info and region refs
# start with epoch_id
epoch_dt_names = ["epoch_id"]
epoch_dt_types = ["uint64"]
# continue new dtype for event info columns, mapped to hdf5 compatible np.dtype
event_table_types = [event_table[c].dtype for c in event_table.columns]
for i, c in enumerate(event_table.columns):
epoch_dt_names.append(c)
epoch_dt_types.append(event_table[c].dtype.__str__())
# construct the new dtype and initialize the epoch np.array
epoch_dt = np.dtype(list(zip(epoch_dt_names, epoch_dt_types)))
epochs = np.ndarray(shape=(len(event_table),), dtype=epoch_dt)
# set the epoch_id counting index and copy the tidied event table
epochs["epoch_id"] = [idx for idx in range(len(event_table))]
for c in event_table.columns:
epochs[c] = event_table[c]
# 3. time lock each epoch to the match tick, and set the
# interval from the function arguments
hio = self.HeaderIO()
# init with nan and set to ms if epoch is in bounds
is_in_bounds = np.zeros(len(epochs)).astype(bool)
with h5py.File(self.h5_fname, "r+") as h5:
# for i,e in event_table.iterrows():
for i, e in enumerate(epochs):
srate = e["dblock_srate"]
# check event table sampling rate agrees w/ dblock
dbp = e["dblock_path"]
hio.get(h5[dbp])
if srate != hio.header["samplerate"]:
msg = (
"{0}['samplerate']: {1} does not match "
"event table[{2}]['dblock_samplerate': "
"{3}"
).format(dbp, hio.header["samplerate"], i, srate)
raise ValueError(msg)
epoch_match_tick_delta = mkh5._ms2samp(tmin_ms, srate)
start_samp = e["match_tick"] + epoch_match_tick_delta
duration_samp = mkh5._ms2samp(
tmax_ms - tmin_ms, srate
) # must be non-negative
if duration_samp <= 0:
msg = (
"epoch interval {0} {1} is less than one sample at "
"{3} ... increase the interval"
).format(tmin_ms, tmax_ms, srate)
raise ValueError(msg)
# move on after bounds check
if start_samp < 0:
warnings.warn(
"data error: pre-stimulus interval is out of bounds left ... "
+ "skipping epoch {0}".format(e)
)
continue
elif start_samp + duration_samp > len(h5[dbp]):
warnings.warn(
"data error: post-stimulus interval is out of bounds right ... "
+ "skipping epoch {0}".format(e)
)
continue
else:
# if in bounds, overwrite np.nan with the epoch start and duration
is_in_bounds[i] = True
e["epoch_match_tick_delta"] = epoch_match_tick_delta
e["epoch_ticks"] = duration_samp
# drop out of bounds epochs and check epochs are consistent
epochs = epochs[is_in_bounds]
# check the epochs for consistency
self._check_epochs_table(epochs)
# 4. add epoch table in the mkh5 file under /EPOCH_TABLES_PATH/epochs_table_name
with h5py.File(self.h5_fname, "r+") as h5:
epochs_path = f"{mkh5.EPOCH_TABLES_PATH}/{epochs_table_name}"
ep = h5.create_dataset(epochs_path, data=epochs)
attrs = {"tmin_ms": tmin_ms, "tmax_ms": tmax_ms}
for k, v in attrs.items():
ep.attrs[k] = v
return None # ok
def export_event_table(self, event_table, event_table_f, format="feather"):
"""fetch the specified event table and save it in the specified format"""
known_formats = ["feather", "txt"] # txt is tab-separated
if format not in known_formats:
msg = "event_table export format must be 'feather' or 'txt'"
raise ValueError(msg)
# event_table = self.get_event_table(code_map_f)
if event_table is None:
msg = (
"uh oh ... event_table is None for {0} ..." "nothing to export"
).format(code_map_f)
raise ValueError(msg)
if format == "feather":
event_table.reset_index(inplace=True)
event_table.to_feather(event_table_f)
elif format == "txt":
event_table.to_csv(event_table_f, sep="\t")
else:
raise RuntimeError()
def get_epochs_table_names(self):
"""returns a list, possibly empty of previously named epochs tables
"""
epochs_names = []
try:
with h5py.File(self.h5_fname, "r") as h5:
epochs_names = [t for t in h5[mkh5.EPOCH_TABLES_PATH].keys()]
except:
pass
return epochs_names
def _pd_series_to_hdf5(self, series):
"""normalize pandas.Series +/- missing or nans to array for hdf5 serialization
Parameter
---------
series : pandas.Series
Returns
-------
arry_hdf5 : np.array, shape (1,), dtype=column scalar dtype
Raises
------
TypeError if series is not pandas.Series
ValueError if series is empty
EpochsTableDataError if series data doesn't convert to hdf5
Supported data types
* a single, homogenous scalar data type drawn from these
float-like: float, np.float32, np.float64, etc.
int-like: int, np.int32, np.int64, etc.
uint-like: np.uint32, np.uint64, etc.
boolean-like: bool, np.bool
string-like: str, bytes, unicode
* missing data/NaNs are supported **except for boolean-like**
NaN, None conversions as follows:
Series type | from | to hdf5
------------ | -------------- | ------------
float-like | np.NaN, None | np.nan
int-like | pd.NaN, None | np.nan, int coerced to float_
uint-like | pd.NaN, None | np.nan, int coerced to float_
string-like | pd.NaN, None | b'NaN'
boolean-like | pd.NaN, None | not allowed
Known dtypes according to pandas 0.21.0 return by infer_dtype
empty (returned when all are None, undocumented in pandas)
string, unicode, bytes, floating, integer,
mixed-integer, mixed-integer-float, decimal,
complex, categorical, boolean, datetime64,
datetime, date, timedelta64, timedelta, time,
period, mixed
Approach: for mkh5 supported dtypes the pd.Series dtype 'O' has 2 cases:
- not hasnans
- values are str_like -> to bytes -> np.array
- values are mixed types -> illegal, die
- hasnans: two cases
- the non-missing values are mixed types: illegal, die
- the non-missing values are homogenous: handle by NaNs by type as above
- float_like -> missing/None are already np.nan -> np.array float
- int_like -> replace nans w/ max int of dtype -> np.array float
- uint_like -> replace nans w/ max uint of dtype -> np.array float
- str_like -> replace nans w/ 'NaN' -> to bytes -> np.array bytes
- bool_like -> NaN/missing illegal, die
"""
if not isinstance(series, pd.Series):
msg = "wrong type {0}: must be pandas.Series".format(type(series))
raise TypeError(msg)
if not len(series) > 0:
msg = "empty series"
raise ValueError(msg)
pd_num_like = ["floating", "integer", "decimal", "complex"]
pd_bytes_like = [
"string",
"unicode",
"bytes",
# 'categorical', # ??
]
pd_bool_like = ["boolean"]
pd_type = pd.api.types.infer_dtype(
series, skipna=False
) # mixed if missing values present
# pd_data_type = pd.api.types.infer_dtype(series.dropna()) # mixed if mixed data
pd_data_type = pd.api.types.infer_dtype(
series, skipna=True
) # mixed if mixed data
series_types = pd.unique([type(i) for i in series.values])
data_types = pd.unique([type(i) for i in series.dropna().values])
# homogonenous data w/ no missing values
if series.dtype != "O": #
try:
arry = np.array(series)
except Exception as fail:
print("column ", series.name)
raise fail
assert arry.dtype != "O"
return arry
else:
# white-list the allowed conversions, all else fails
# any combination of str-like values +/- missing data -> bytes +/- 'NaN
if all([pd.api.types.is_string_dtype(dt) for dt in data_types]):
if series.hasnans:
series.fillna(".NAN", inplace=True)
# try each value to diagnosis if problem
for v in series.values:
try:
np.array([v]).astype(np.string_)
except Exception as fail:
msg = ("\nvalue: {0}\n" "column: {1}").format(
v, series.name
)
print(msg)
raise fail
# now try whole series
try:
arry = np.array(series.values.astype(np.string_))
except Exception as fail:
print("column ", series.name)
raise fail
assert arry.dtype != "O"
return arry
# handle num-like +/- missing data
elif pd_data_type in pd_num_like:
try:
arry = np.array(series)
except Exception as fail:
print("column ", series.name)
raise fail
assert arry.dtype != "O"
return arry
else:
# fail this blocks mixed numerics, boolean+NaN
raise mkh5.EpochsTableDataError(pd_data_type, series)
def get_epochs_table(self, epochs_name, format="pandas"):
"""look up a previously set epochs table by name
Parameters
----------
epochs_name : str
name of a previously defined epochs table as set with an
mkh5.set_epochs(event_table)
format : str {'pandas', 'numpy'}
pandas.Dataframe or numpy.ndarray
Returns
-------
epochs_table : pandas.Dataframe or numpy.ndarray
Bytestrings from the hdf5 are converted to unicode epochs_table
table returned
"""
if format not in ["pandas", "numpy"]:
msg = "format must be 'pandas' or 'numpy'"
raise ValueError(msg)
epochs_table = None
with h5py.File(self.h5_fname, "r") as h5:
epochs_path = f"{mkh5.EPOCH_TABLES_PATH}/{epochs_name}"
epochs_table = h5[epochs_path][...]
if epochs_table is None:
msg = "epochs table not found: {0}".format(epochs_name)
raise RuntimeError(msg)
# clean up hdf5 bytestrings
# FIX ME for NANs?
dts = []
for n in epochs_table.dtype.names:
if epochs_table.dtype[n].kind == "S":
dts.append((n, "U" + str(epochs_table.dtype[n].itemsize)))
else:
dts.append((n, epochs_table.dtype[n]))
dts = np.dtype(dts)
eptbl = np.empty(shape=epochs_table.shape, dtype=dts)
for idx in range(epochs_table.shape[0]):
for c in dts.names:
value = copy.deepcopy(epochs_table[c][idx])
if hasattr(value, "decode"):
eptbl[c][idx] = value.decode("utf8")
else:
eptbl[c][idx] = value
# run consistency check
self._check_epochs_table(eptbl)
if format == "pandas":
eptbl = | pd.DataFrame(eptbl) | pandas.DataFrame |
import os
from collections import OrderedDict
from functools import partial
from time import time, strftime, gmtime
from adjustText import adjust_text
from multipledispatch import dispatch
from matplotlib import rc
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
import seaborn as sns
import numpy as np
from matplotlib.ticker import NullFormatter
from sklearn.decomposition import PCA
import umap
from sklearn import manifold
from sklearn.decomposition import FastICA
from sklearn.manifold import Isomap
def pca_visualization(df, feature_ids, savedir):
inliers = np.array(df.loc[df['is_anomaly'] == 0, 'is_anomaly'].index).tolist()
outliers = np.array(df.loc[df['is_anomaly'] == 1, 'is_anomaly'].index).tolist()
zindex_points = [*inliers, *outliers]
X = df.drop(columns=['is_anomaly'])
# Fixing z-index to be higher for outliers (they will be rendered last)
X = pd.concat([X.iloc[inliers], X.iloc[outliers]], axis=0)
n_components = 2
plt.suptitle('Features = ' + str(feature_ids), fontsize=12, wrap=True)
colors = get_colors(len(inliers), len(outliers))
pca = PCA(n_components=n_components, random_state=0)
t0 = time()
X_tr = pca.fit_transform(X)
t1 = time()
plt.scatter(X_tr[:, 0], X_tr[:, 1], c=colors, cmap='Spectral')
plt.title("%s (%.2g sec)" % ('PCA', t1 - t0))
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
texts = []
for j, p in enumerate(zindex_points):
if p in outliers:
texts.append(plt.text(X_tr[j, 0], X_tr[j, 1], p, color='brown', fontweight='bold', size=9))
adjust_text(texts, autoalign='')
dt_string = strftime("%d%m%Y%H%M%S", gmtime())
if not os.path.exists(savedir):
os.makedirs(savedir)
final_output = os.path.join(savedir, 'pca_' + dt_string + '.png')
plt.savefig(final_output, dpi=300, bbox_inches='tight', pad_inches=0.1)
# plt.show()
plt.clf()
def ica_visualization(df, savedir):
ica = FastICA(n_components=2, random_state=0)
ica_trans = ica.fit_transform(df.drop(columns=['is_anomaly']))
principalDf = pd.DataFrame(data=ica_trans
, columns=['ica1', 'ica2'])
finalDf = pd.concat([principalDf, df[['is_anomaly']]], axis=1)
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
ax.set_xlabel('ICA 1', fontsize=15)
ax.set_ylabel('ICA 2', fontsize=15)
# ax.set_title('2 component PCA', fontsize=20)
targets = [0, 1]
colors = ['b', 'r']
for target, color in zip(targets, colors):
indicesToKeep = finalDf['is_anomaly'] == target
ax.scatter(finalDf.loc[indicesToKeep, 'ica1']
, finalDf.loc[indicesToKeep, 'ica2']
, c=color
)
ax.legend(targets)
ax.grid()
plt.show()
plt.clf()
def umap_visualization(df, savedir):
um = umap.UMAP(n_neighbors=25, random_state=0, n_components=2)
X_train = df.drop(columns=['is_anomaly'])
Y_train = df['is_anomaly']
trans = um.fit(X_train)
plt.scatter(trans.embedding_[:, 0], trans.embedding_[:, 1], s=25, c=Y_train, cmap='Spectral')
plt.show()
def tsne_visualization(df, feature_ids, savedir):
inliers = np.array(df.loc[df['is_anomaly'] == 0, 'is_anomaly'].index).tolist()
outliers = np.array(df.loc[df['is_anomaly'] == 1, 'is_anomaly'].index).tolist()
zindex_points = [*inliers, *outliers]
X = df.drop(columns=['is_anomaly'])
# Fixing z-index to be higher for outliers (they will be rendered last)
X = pd.concat([X.iloc[inliers], X.iloc[outliers]], axis=0)
n_components = 2
plt.suptitle('Features = ' + str(feature_ids), fontsize=12, wrap=True)
colors = get_colors(len(inliers), len(outliers))
tsne = manifold.TSNE(n_components=n_components, init='pca', perplexity=40, random_state=0)
t0 = time()
X_tr = tsne.fit_transform(X)
t1 = time()
plt.scatter(X_tr[:, 0], X_tr[:, 1], c=colors, cmap='Spectral')
plt.title("%s (%.2g sec)" % ('t-SNE', t1 - t0))
plt.xlabel('t-SNE Embedding 1')
plt.ylabel('t-SNE Embedding 2')
texts = []
for j, p in enumerate(zindex_points):
if p in outliers:
texts.append(plt.text(X_tr[j, 0], X_tr[j, 1], p, color='brown', fontweight='bold', size=9))
adjust_text(texts, autoalign='')
dt_string = strftime("%d%m%Y%H%M%S", gmtime())
if not os.path.exists(savedir):
os.makedirs(savedir)
final_output = os.path.join(savedir, 'tsne_' + dt_string + '.png')
plt.savefig(final_output, dpi=300, bbox_inches='tight', pad_inches=0.1)
plt.clf()
def visualize_selected_features(df, feature_ids, savedir):
if len(feature_ids) > 3:
dim_reduction_vizualizations(df, feature_ids, savedir)
elif len(feature_ids) == 3:
dim_reduction_vizualizations(df, feature_ids, savedir)
actual_features_vizualizations(df, feature_ids, savedir)
elif len(feature_ids) == 2:
actual_features_vizualizations(df, feature_ids, savedir)
else:
print('Only one feature to visualize...')
def actual_features_vizualizations(df, feature_ids, savedir):
inliers = np.array(df.loc[df['is_anomaly'] == 0, 'is_anomaly'].index).tolist()
outliers = np.array(df.loc[df['is_anomaly'] == 1, 'is_anomaly'].index).tolist()
zindex_points = [*inliers, *outliers]
X = df.drop(columns=['is_anomaly'])
# Fixing z-index to be higher for outliers (they will be rendered last)
X = pd.concat([X.iloc[inliers], X.iloc[outliers]], axis=0)
fig = plt.figure()
colors = get_colors(len(inliers), len(outliers))
if len(feature_ids) == 2:
ax = fig.add_subplot(1, 1, 1)
ax.scatter(X.iloc[:, 0], X.iloc[:, 1], c=colors, cmap='Spectral')
ax.set_title('Visualization 2D')
ax.set_xlabel('F' + str(feature_ids[0]))
ax.set_ylabel('F' + str(feature_ids[1]))
texts = []
for j, p in enumerate(zindex_points):
if p in outliers:
texts.append(ax.text(X.iloc[j, 0], X.iloc[j, 1], p,
color='brown',
fontweight='bold',
# bbox={'facecolor': 'red', 'alpha': 0.8, 'pad': 1},
size=9))
adjust_text(texts, autoalign='') # , arrowprops=dict(arrowstyle="->", color='black', lw=0.5, shrinkA=5))
if len(feature_ids) == 3:
ax = fig.gca(projection='3d')
ax.scatter(X.iloc[:, 0], X.iloc[:, 1], X.iloc[:, 2], c=colors, cmap='Spectral')
ax.set_title('Visualization 3D')
ax.set_xlabel('F' + str(feature_ids[0]))
ax.set_ylabel('F' + str(feature_ids[1]))
ax.set_zlabel('F' + str(feature_ids[2]))
for j, p in enumerate(zindex_points):
if p in outliers:
ax.text(X.iloc[j, 0], X.iloc[j, 1], X.iloc[j, 2], p,
color='brown',
fontweight='bold',
# bbox={'facecolor': 'red', 'alpha': 0.8, 'pad': 1},
size=9)
# plt.tight_layout()
# plt.show()
dt_string = strftime("%d%m%Y%H%M%S", gmtime())
final_output = os.path.join(savedir, 'actual_viz_' + dt_string + '.png')
plt.savefig(final_output, dpi=300, bbox_inches='tight', pad_inches=0.1)
plt.clf()
def dim_reduction_vizualizations(df, feature_ids, savedir):
inliers = np.array(df.loc[df['is_anomaly'] == 0, 'is_anomaly'].index).tolist()
outliers = np.array(df.loc[df['is_anomaly'] == 1, 'is_anomaly'].index).tolist()
zindex_points = [*inliers, *outliers]
X = df.drop(columns=['is_anomaly'])
# Fixing z-index to be higher for outliers (they will be rendered last)
X = | pd.concat([X.iloc[inliers], X.iloc[outliers]], axis=0) | pandas.concat |
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import pandas as pd
import arrow
from pathlib import Path
from loguru import logger
class DrawImage:
def __init__(self):
self.root = Path(__file__).parent.parent.absolute()
self.all_data_path = self.root / "rt" / Path("all_data.csv")
self.datetime_tag_path = self.root / "rt" / Path("datetime.tag")
self.image_path = self.root / "rt" / f"{self.datetime_tag_path.read_text()}.png"
self.wuxi_data_path = self.root / "rt" / Path("wuxi_data.csv")
self.suzhou_data_path = self.root / "rt" / Path("suzhou_data.csv")
self.columns_width = np.array([8, 12, 10, 10, 10, 10, 10, 10, 10, 10]) * 60
self.row_height = [180, 360, 180, *[180] * 16]
self.fonesize = 140
self.font = ImageFont.truetype(font=str(self.root / 'static/kjgong.ttf'), size=self.fonesize)
self.linewidth = 6
self.spacing = self.fonesize * 0.1
self.start = (10, 10)
def create_figure(self):
logger.info("创建画布")
width = sum(self.columns_width)
height = sum(self.row_height)
self.image = Image.new(mode="RGB", size=(width + 20, height + 20), color="white")
self.draw = ImageDraw.Draw(self.image)
x, y, w, h = *self.start, width, height
logger.info("画大矩形")
self.draw.rectangle(xy=((x, y), (x + w, y + h)),
fill=None, outline="black", width=12)
def draw_rec_text(self, loc, text, fill="black"):
x, y, w, h = self.get_location(*loc)
self.draw.rectangle(xy=((x, y), (x + w, y + h)), fill=None,
outline="black", width=self.linewidth)
if isinstance(text, str):
text = [text]
for i, t in enumerate(text):
fsize = self.font.getsize(t)
height_offset = (i - len(text) / 2 + 0.5) * (self.spacing + self.fonesize)
self.draw.text(xy=(
x + w / 2 - fsize[0] / 2, y + h / 2 - fsize[1] / 2 + height_offset), text=t, fill=fill, font=self.font)
def get_location(self, row_start, col_start, row_end=None, col_end=None, ):
if not (row_end or col_end):
row_end = row_start
col_end = col_start
x = sum(self.columns_width[:col_start]) + self.start[0]
y = sum(self.row_height[:row_start]) + self.start[1]
w = sum(self.columns_width[col_start:col_end + 1])
h = sum(self.row_height[row_start:row_end + 1])
return x, y, w, h
def draw_data(self):
self.create_figure()
logger.info("正在绘制表头")
self.draw_rec_text((0, 0, 2, 0), "序号")
self.draw_rec_text((0, 1, 2, 1), "点位")
datetime = arrow.get(self.datetime_tag_path.read_text(), "YYYY-MM-DDTHH")
self.draw_rec_text((0, 2, 0, 9), datetime.format("YYYY-MM-DD HH:mm"))
self.draw_rec_text((1, 2, 1, 3), ["PM2.5", "(微克/立方米)"])
self.draw_rec_text((2, 2), "实时")
self.draw_rec_text((2, 3), "当日累计")
self.draw_rec_text((1, 4, 1, 5), ["PM10", "(微克/立方米)"])
self.draw_rec_text((2, 4), "实时")
self.draw_rec_text((2, 5), "当日累计")
self.draw_rec_text((1, 6, 1, 7), ["NO2", "(微克/立方米)"])
self.draw_rec_text((2, 6), "实时")
self.draw_rec_text((2, 7), "当日累计")
self.draw_rec_text((1, 8, 1, 9), ["O3", "(微克/立方米)"])
self.draw_rec_text((2, 8), "实时")
self.draw_rec_text((2, 9), "MDA8")
df = pd.read_csv(self.all_data_path)
for i, d in df.iterrows():
self.draw_rec_text((i + 3, 0), str(i + 1))
self.draw_rec_text((i + 3, 1), d["STATION_NAME"])
self.draw_rec_text((16, 0, 16, 1), "全市")
self.draw_rec_text((17, 0, 17, 1), "无锡")
self.draw_rec_text((18, 0, 18, 1), "苏州")
# 读取无锡数据
wuxi_data = pd.read_csv(self.wuxi_data_path, index_col=0, parse_dates=True, na_values=["-", "—", ""])
wuxi_data = wuxi_data.loc[:, ["PM2_5", "PM10", "NO2","O3"]]
wuxi_p_series = pd.concat([wuxi_data.iloc[-1], wuxi_data.mean().astype(int)])
wuxi_p_series.index = ["PM25", "PM10", "NO2", "O3", "PM25_CUM", "PM10_CUM", "NO2_CUM", "O3_CUM"]
wuxi_p_series["O3_CUM"] = wuxi_data["O3"].rolling(8,8).mean().max()
# 读取苏州数据
suzhou_data = | pd.read_csv(self.suzhou_data_path, index_col=0, parse_dates=True, na_values=["-", "—", ""]) | pandas.read_csv |
import math
import functools
import collections
import numpy as np
def _return_or_close_fig(fig, return_fig):
from matplotlib import pyplot as plt
if return_fig:
return fig
else:
plt.show()
plt.close(fig)
def plot_trials(
self,
y='score',
color_scheme='Set2',
figsize=(8, 3),
return_fig=False,
):
from matplotlib import pyplot as plt
import seaborn as sns
df = self.to_df()
if y is None:
y = self.minimize
if y == 'score':
best_y = min(self.scores)
ylabel = 'Score'
if y == 'size':
best_y = math.log2(self.best[y])
ylabel = 'log2[SIZE]'
if y == 'flops':
best_y = math.log10(self.best[y])
ylabel = 'log10[FLOPS]'
if y == 'write':
best_y = math.log10(self.best[y])
ylabel = 'log10[WRITE]'
if y == 'combo':
best_y = math.log2(self.best['flops']) + math.log2(self.best['size'])
ylabel = 'log10[FLOPS + 256 * WRITE]'
df['combo'] = [math.log10(f + 256 * m)
for f, m in zip(self.costs_flops, self.costs_write)]
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.axhline(best_y, color=(0, 0, 1, 0.1), linestyle=':')
sns.scatterplot(
y=y,
x='run',
data=df,
ax=ax,
style='method',
hue='method',
palette=color_scheme,
)
ax.set(ylabel=ylabel)
ax.grid(True, c=(0.98, 0.98, 0.98))
ax.set_axisbelow(True)
handles, labels = ax.get_legend_handles_labels()
ax.legend(
handles=handles,
labels=labels,
bbox_to_anchor=(0.5, 1.0),
ncol=(len(labels) * 6) // len(labels),
loc='lower center',
columnspacing=1,
handletextpad=0,
frameon=False,
)
return _return_or_close_fig(fig, return_fig)
def plot_trials_alt(self, y=None, width=800, height=300):
"""Plot the trials interactively using altair.
"""
import altair as alt
import pandas as pd
df = self.to_df()
if y is None:
y = self.minimize
if y == 'size':
best_y = math.log2(self.best[y])
ylabel = 'log2[SIZE]'
if y == 'flops':
best_y = math.log10(self.best[y])
ylabel = 'log10[FLOPS]'
hline = alt.Chart(
| pd.DataFrame({'best': [best_y]}) | pandas.DataFrame |
import ipyvuetify as v
import pandas as pd
from ipywidgets import Output
from matplotlib import pyplot as plt
from component import parameter as cp
class LayerFull(v.Layout):
COLORS = cp.gradient(5) + ['grey']
def __init__(self, layer_name, values, aoi_names, colors):
# read the layer list and find the layer information based on the layer name
layer_list = | pd.read_csv(cp.layer_list) | pandas.read_csv |
import pandas as pd
import glob
import os
from pandasgui import show #pip install pypiwin32
import numpy as np
import matplotlib.pyplot as plt
import sqlite3
#link do poleceń https://jug.dpieczynski.pl/lab-ead/Lab%2004%20-%20Projekt%20blok1_2021.html
def load_data():
path = os.getcwd()
#path = os.getcwd()+'\Data_test' #_test
all_files = glob.glob(path + "/*.txt")
#print(all_files) #sprawdzenie co wczytuje
df0 = pd.DataFrame()
years=[]
for filename in all_files:
df = pd.read_csv(filename, header=None)
year = filename.replace(path+"\yob", '')
year = year.replace(".txt", '')
years.append(year)
df['Year'] = year
df0 = df0.append(df)
df0 = df0.rename(columns={0: 'Name', 1: 'Sex', 2: 'Number'})
return df0, years
def task2_3(df0):
print("Zad2: ")
print(f'Ilosc nadanych unikalnych imion bez rozrozniania na meskie i zenskie: {df0.nunique()[0]} ')
print("===================")
df_m = df0[df0['Sex'] == 'M']
df_f = df0[df0['Sex'] == 'F']
print("Zad3: ")
print(f'Ilosc nadanych unikalnych imion meskich: {df_m.nunique()[0]} ')
print(f'Ilosc nadanych unikalnych imion zenskich: {df_f.nunique()[0]} ')
print("===================")
def task4_f(df):
df.groupby(['Year', 'Sex']).sum().unstack('Sex')
fem_births = df[df['Sex'] == 'F'].Number
df['freq fem']=fem_births/fem_births.sum()
mal_births=df[df['Sex'] == 'M'].Number
df['freq m'] = mal_births/mal_births.sum()
return df
def task4(df0, ifprint):
#TODO Rozwiazanie bazuje na: https://pandasguide.readthedocs.io/en/latest/Pandas/babyname.html
results = df0.groupby(['Year', 'Sex']).apply(task4_f)
if ifprint==True:
print("Zad 4:")
print(results)
print("===================")
return results
def task5(df0, years):
df_p = pd.pivot_table(df0, index=['Name'], columns=['Year', 'Sex'])
df_p = df_p['Number']
n_births = []
for i in years:
n_births.append(df_p[i].sum())
total_n_births = []
births_ratio_f_to_m = []
for i in range(0, len(n_births)):
#[x][y] x - nr probki(rok), y - plec(0=f 1=m)
births_ratio_f_to_m.append((n_births[i][0]/n_births[i][1]-1)*100)
total_n_births.append((n_births[i][0]+n_births[i][1]))
fixed_years = list(map(int, years))
f, axes = plt.subplots(2)
axes[0].bar(fixed_years,total_n_births)
axes[0].set_title("Zad 5_1 urodzenia(lata)")
axes[1].bar(fixed_years,births_ratio_f_to_m)
axes[1].set_ylabel("% (wiecej/mniej) zenskich imion wzgledem meskich") #0% = tyle samo zenskich imion i meskich, 100% = 100% wiecej zenskich, -30% = o 30% mniej zenskich imion niz meskich
axes[1].set_title("Zad 5_2 f/m ratio")
plt.xticks(np.arange(min(fixed_years), max(fixed_years) + 1, 20.0))
plt.show()
def task6(df, years, ifprint):
many_F_dfs = []
many_M_dfs = []
fem_df = pd.DataFrame()
mal_df = | pd.DataFrame() | pandas.DataFrame |
import boto3
import json
import os
import requests
import pandas as pd
import warnings
from pandas import json_normalize
from github import Github
warnings.filterwarnings('ignore')
class Get_GW_Releases:
def __init__(self):
git_token = os.getenv('GIT_TOKEN')
self.git_headers = {'Authorization': f'token {git_token}'}
g = Github(os.getenv('GIT_TOKEN'))
org = g.get_organization("k8-proxy")
self.all_repos = org.get_repos()
self.onerepo = ['GW-Releases']
def get_all(self):
allrepos = []
for repo in self.all_repos:
myrepo = repo.id, repo.name, repo.html_url
allrepos.append(myrepo)
k8repos = pd.DataFrame(allrepos)
k8repos.columns = ['id', 'name', 'repo_url']
return k8repos
# Get content function
def get_content(self,repos):
data = []
for repo in repos:
url = f'https://api.github.com/repos/k8-proxy/{repo}/contents'
req = requests.get(url, headers=self.git_headers).json()
temp_data = json_normalize(req, max_level=1)
temp_df = pd.DataFrame(temp_data)
data.append(temp_df )
content = | pd.concat(data, ignore_index=True) | pandas.concat |
import pytest
import numpy as np
import pandas as pd
from pandas import Categorical, Series, CategoricalIndex
from pandas.core.dtypes.concat import union_categoricals
from pandas.util import testing as tm
class TestUnionCategoricals(object):
def test_union_categorical(self):
# GH 13361
data = [
(list('abc'), list('abd'), list('abcabd')),
([0, 1, 2], [2, 3, 4], [0, 1, 2, 2, 3, 4]),
([0, 1.2, 2], [2, 3.4, 4], [0, 1.2, 2, 2, 3.4, 4]),
(['b', 'b', np.nan, 'a'], ['a', np.nan, 'c'],
['b', 'b', np.nan, 'a', 'a', np.nan, 'c']),
(pd.date_range('2014-01-01', '2014-01-05'),
pd.date_range('2014-01-06', '2014-01-07'),
pd.date_range('2014-01-01', '2014-01-07')),
(pd.date_range('2014-01-01', '2014-01-05', tz='US/Central'),
pd.date_range('2014-01-06', '2014-01-07', tz='US/Central'),
pd.date_range('2014-01-01', '2014-01-07', tz='US/Central')),
(pd.period_range('2014-01-01', '2014-01-05'),
pd.period_range('2014-01-06', '2014-01-07'),
pd.period_range('2014-01-01', '2014-01-07')),
]
for a, b, combined in data:
for box in [Categorical, CategoricalIndex, Series]:
result = union_categoricals([box(Categorical(a)),
box( | Categorical(b) | pandas.Categorical |
#!/usr/bin/env python3
# coding: utf-8
"""Global sequencing data for the home page
Author: <NAME> - Vector Engineering Team (<EMAIL>)
"""
import argparse
import pandas as pd
import numpy as np
import json
from pathlib import Path
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--case-data", type=str, required=True, help="Path to case data CSV file",
)
parser.add_argument(
"--location-map",
type=str,
required=True,
help="Path to location map JSON file",
)
parser.add_argument(
"-o", "--output", type=str, required=True, help="Path to output directory",
)
args = parser.parse_args()
out_path = Path(args.output)
# Load case counts by country
case_count_df = pd.read_csv(
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv"
)
case_count_df.rename(columns={"Country/Region": "country"}, inplace=True)
# Upgrade some province/states to country/regions
upgrade_provinces = [
"Hong Kong",
"Macau",
"Faroe Islands",
"Greenland",
"French Guiana",
"French Polynesia",
"Guadeloupe",
"Martinique",
"Mayotte",
"New Caledonia",
"Reunion",
"Saint Barthelemy",
"Saint Pierre and Miquelon",
"St Martin",
"Aruba",
"Bonaire, Sint Eustatius and Saba",
"Curacao",
"Sint Maarten",
"Anguilla",
"Bermuda",
"British Virgin Islands",
"Cayman Islands",
"Falkland Islands (Malvinas)",
"Gibraltar",
"Isle of Man",
"Channel Islands",
"Montserrat",
"Turks and Caicos Islands",
"American Samoa",
"Guam",
"Northern Mariana Islands",
"Virgin Islands",
"Puerto Rico",
]
upgrade_province_inds = case_count_df["Province/State"].isin(upgrade_provinces)
case_count_df.loc[upgrade_province_inds, "country"] = case_count_df.loc[
upgrade_province_inds, "Province/State"
]
# Group by country/region
case_count_df = (
case_count_df.drop(columns=["Lat", "Long"])
.groupby("country")
.agg(np.sum)
.reset_index()
)
# Unpivot table
case_count_df = pd.melt(
case_count_df,
id_vars=["country"],
var_name="date",
value_name="cumulative_cases",
)
# Convert date strings to datetime objects
case_count_df["date"] = | pd.to_datetime(case_count_df["date"]) | pandas.to_datetime |
import pandas as pd
import numpy as np
def merge_edge_lists(edge_list_dfs):
"""Merge edge lists. Weights of overlapping edges are summed.
Parameters
----------
edge_list_dfs: a sequence of DataFrame
The DataFrame containing the edge lists.
"""
# Concatenate DataFrame
concat_edge_list_df = | pd.concat(edge_list_dfs, ignore_index=True) | pandas.concat |
import pandas as pd
import numpy as np
# Read the csv file
df = pd.read_csv('../../data/original_dataset/preprocessed_data.csv', header=0, index_col=0)
df.reset_index(drop=True,inplace=True)
# Set tables reading options
pd.set_option('display.max_columns', 500)
| pd.set_option('display.max_rows', 500) | pandas.set_option |
import pandas
import sklearn
import numpy
from sklearn import preprocessing
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.feature_selection import chi2
import json
import ast
import os
import glob
import operator
import argparse
import sys
def createModelList(modelNameFilePath, dataFile):
''' Appends model name to existing list of model name
dataFile = name of data input file
modelNameFilePath = path where models.txt is stored
'''
try:
treeList = []
write = True
if (os.path.exists(modelNameFilePath)):
fh = open(modelNameFilePath, "r")
treeList = ast.literal_eval(fh.read())
fh.close()
if (str(dataFile)) not in treeList:
treeList.append(dataFile)
else:
write = False
else:
treeList.append(dataFile)
if (write):
fh = open(modelNameFilePath, "w")
fh.write(json.dumps(treeList, ensure_ascii='False'))
fh.close()
except Exception as e:
print(e)
return False
return True
def writeCluster(fileName, clusterList, globalCount):
''' Writes cluster structure to json file
fileName = name of data input file
clusterList = hierarchical cluster created
globalCount = dictionary with key as level and value as number of nodes at that level
'''
try:
maximumKey = 0
maximumValue = 0
for key, value in globalCount.items():
if (key > maximumKey):
maximumKey = key
if (value > maximumValue):
maximumValue = value
clusterList["height"] = maximumKey
clusterList["width"] = maximumValue
clusterListJson = json.dumps(clusterList, ensure_ascii='False', indent=4)
fh = open(fileName + ".json", "w")
fh.write("[" + clusterListJson + "]")
fh.close()
except Exception as e:
print(e)
return False
return True
#recursive hierarchical clustering function
def hierarchicalClustering(parentCluster, level, positions, parent, clusterList,
silhouetteThreshold, statsPath, user, attributes, globalCount):
''' Recursive Binary Hierarchical Clustering Function
parentCluster = cluster to be subclustered
level = next level to be clustered
positions = list of entities in parentCluster by index
parent = name of parent cluster
clusterList = hierarchical cluster structure to which subsequent clusters are appended to
silhouetteThreshold = silhouette score threshold range in [-1 to 1]
statsPath = path to statistics directory
user = list of all user ids
attributes = feature names
globalCount = dictionary with key as level and value as number of nodes at that level
'''
print("At node " + parent)
silhouetteScore = -1
#tree node count dictionary at each level for labelling
if (len(parentCluster) < 2):
return False
clusterCount = 2
#clustering
clusterer = KMeans(n_clusters=clusterCount, random_state=0)
kmeans = clusterer.fit(parentCluster)
clusterLabels = clusterer.fit_predict(parentCluster)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
if (1 < len(set(clusterLabels)) < len(parentCluster)):
silhouetteScore = silhouette_score(parentCluster, clusterLabels)
#silhouette score threshold check
if (silhouetteScore < silhouetteThreshold):
return False
indeX = chi2(parentCluster, clusterLabels)[0].tolist()
#finding max chi score and its feature index
chi_max = 0
iterator = 0
index_ = -1
featureSignificance = {}
while (iterator < len(indeX)):
featureSignificance[attributes[iterator]] = indeX[iterator]
if (indeX[iterator] > chi_max):
chi_max = indeX[iterator]
index_ = iterator
iterator = iterator + 1
featureSignificanceSorted = sorted(featureSignificance.items(),
key=lambda x: x[1],
reverse=True)
try:
globalCount[level]
except:
globalCount[level] = 0
final_ = []
childCluster = []
{
iterator: final_.append(numpy.where(kmeans.labels_ == iterator)[0])
for iterator in range(kmeans.n_clusters)
}
clusterIterator = 0
#for each cluster formed
while clusterIterator < kmeans.n_clusters:
iterator = 0
childCluster = []
new_positions = []
ids = []
while iterator < len(final_[clusterIterator]):
childCluster.append(parentCluster[final_[clusterIterator][iterator]])
new_positions.append(positions[final_[clusterIterator][iterator]])
ids.append(user[positions[final_[clusterIterator][iterator]]])
iterator = iterator + 1
#cluster data interpretation
clusterSummary = {}
clusterName = 'L' + str(level) + 'G' + str(globalCount[level])
dataIds = pandas.DataFrame(ids, columns=["IDS"])
dataStats = pandas.DataFrame(childCluster, columns=attributes)
stats = dataStats[attributes].head(n=len(childCluster)).describe()
clusterSummary['ClusterId'] = clusterName
clusterSummary['Size'] = len(childCluster)
clusterSummary['Primary feature cluster created by'] = attributes[index_]
clusterSummary['Features chi score'] = featureSignificanceSorted
clusterSummary['Stats on cluster by each feature'] = stats.to_dict()
clusterSummary['Ids'] = ids
clusterSummaryJson = json.dumps(clusterSummary, ensure_ascii='False', indent=4)
fh = open(statsPath + clusterName + 'clusterDescription' + ".json", "w")
fh.write("[" + clusterSummaryJson + "]")
fh.close()
clusterIterator = clusterIterator + 1
processingList = {}
processingList["name"] = clusterName
processingList["desc"] = clusterName + ",Cluster Size :" + str(
len(childCluster)) + ',Split by :' + str(attributes[index_]) + ",Mean : " + str(
round(dataStats[attributes[index_]].mean(), 4))
processingList["parent"] = parent
processingList['size'] = len(childCluster)
if (float(len(childCluster) / len(parentCluster)) < 0.1):
processingList['line_color'] = 'red'
processingList['alert_color'] = 'red'
else:
processingList['line_color'] = '#000'
processingList['alert_color'] = '#000'
processingList["children"] = []
globalCount[level] = globalCount[level] + 1
hierarchicalClustering(childCluster, level + 1, new_positions, clusterName,
processingList["children"], silhouetteThreshold, statsPath, user,
attributes, globalCount)
clusterList.append(processingList)
return True
def initializeClusterList(transformedValues, clusterList):
'''Initialises clusterList
transformedValues = Input feature vector
clusterList = hierarchical cluster structure to which subsequent clusters are appended to
'''
try:
clusterList["name"] = 'L0G0'
clusterList["parent"] = "null"
clusterList["desc"] = "Cluster Size :" + str(len(transformedValues))
clusterList["size"] = len(transformedValues)
clusterList["line_color"] = '#000'
clusterList["alert_color"] = '#000'
clusterList["children"] = []
clusterList["clusterCreated"] = False
except Exception as e:
print(e)
return False
return True
def clustering(dataFilePath, silhouetteThreshold=0.65):
''' Clusering function
dataFilePath = Path of input csv file
silhouetteThreshold = silhouette score threshold
'''
user = []
transformedValues = []
attributes = []
clusterList = {}
positions = []
globalCount = {}
if (not os.path.exists(dataFilePath)):
print(dataFilePath + " Not found")
sys.exit(-1)
dataFile = os.path.splitext(os.path.basename(dataFilePath))[0]
statsPath = "Statistics/Hierarchical/" + str(dataFile) + "/"
visualisationDirectory = "Visualisation/"
modelNameFilePath = visualisationDirectory + 'models.txt'
vector = | pandas.read_csv(dataFilePath) | pandas.read_csv |
#!/home/knielbo/virtenvs/ndhl/bin/python
"""
Extract content and metadata for NYT collection
@author: <EMAIL>
"""
import os
import sys
import glob
import re
from xml.dom.minidom import parse, Node
from pandas import DataFrame
# parser
def tag2list(fname, tag = "p"):
"""
Get text node content based on tags in xml file
- store every element in list
"""
xmltree=parse(fname)
tag_list = []
for node1 in xmltree.getElementsByTagName(tag):
for node2 in node1.childNodes:
if node2.nodeType == Node.TEXT_NODE:
tag_list.append(node2.data)
return tag_list
# metadata to dat file with fname as key
def gen_metadata(text, meta_tags = ["publication_day_of_month", "publication_month", "publication_year","publication_day_of_week","dsk","print_page_number","print_section","print_column"]):
"""
Create metadata file with objects in rows and features in columns
- text: vanilla nitf xml file
- meta_tags: tags from text to be treated as metadata
"""
match = re.findall(r'<meta content=(.*)/>',text)
colname = []
metadata = []
colname = []
metadata = []
for i in range(len(match)):
tmp = match[i]
tmp = re.sub(r'"','',tmp)
tmp = tmp.split(" name=")
metadata.append(tmp[0])
colname.append(tmp[1])
result = []
for tag in meta_tags:# try: result.append(metadata[colname.index(tag)]); except: result.append(NA)
result.append(metadata[colname.index(tag)])
return result
# title, and content to dat file with fname as key
## TODO: remove repetitions of content
def get_content(fname):
"""
Extract content from paragraphs in xml file
- fname: filename for xml
- remove LEAD paragraphs
- keep paragraph structure with line char
"""
content = tag2list(fname)
title = tag2list(fname, tag = "title")[0]
pat = re.compile(r"LEAD:")
idx = []
for i, p in enumerate(content):
if pat.match(p):
idx.append(i)
if idx:
for i in sorted(idx, reverse = True):
del content[i]
return " \n".join(content), title
def main():
#fpath = sys.argv[1]
#fpath = os.path.join("..","dat","sample")# sample
fpath = os.path.join("..","..","..","data","nyt","xml")# full data set
fnames = sorted(glob.glob(os.path.join(fpath,"*.xml")))
META, DATA = list(), list()
for fname in fnames:
try:
fileid = fname.split("/")[-1].split(".")[0]
print(fileid)
with open(fname, "r") as f:
text = f.read()
#text = open(fname).read()
META.append([fileid] + gen_metadata(text))
DATA.append([fileid, tag2list(fname, tag = "title")[0], get_content(fname)])
except:
pass
DF_meta = DataFrame(META)
DF_content = | DataFrame(DATA) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 21 14:08:43 2019
to produce X and y use combine_pos_neg_from_nc_file or
prepare_X_y_for_holdout_test
@author: ziskin
"""
from PW_paths import savefig_path
from PW_paths import work_yuval
from pathlib import Path
cwd = Path().cwd()
hydro_path = work_yuval / 'hydro'
axis_path = work_yuval/'axis'
gis_path = work_yuval / 'gis'
ims_path = work_yuval / 'IMS_T'
hydro_ml_path = hydro_path / 'hydro_ML'
gnss_path = work_yuval / 'GNSS_stations'
# 'tela': 17135
hydro_pw_dict = {'nizn': 25191, 'klhv': 21105, 'yrcm': 55165,
'ramo': 56140, 'drag': 48125, 'dsea': 48192,
'spir': 56150, 'nrif': 60105, 'elat': 60190
}
hydro_st_name_dict = {25191: 'Lavan - new nizana road',
21105: 'Shikma - Tel milcha',
55165: 'Mamsheet',
56140: 'Ramon',
48125: 'Draga',
48192: 'Chiemar - down the cliff',
46150: 'Nekrot - Top',
60105: 'Yaelon - Kibutz Yahel',
60190: 'Solomon - Eilat'}
best_hp_models_dict = {'SVC': {'kernel': 'rbf', 'C': 1.0, 'gamma': 0.02,
'coef0': 0.0, 'degree': 1},
'RF': {'max_depth': 5, 'max_features': 'auto',
'min_samples_leaf': 1, 'min_samples_split': 2,
'n_estimators': 400},
'MLP': {'alpha': 0.1, 'activation': 'relu',
'hidden_layer_sizes': (10,10,10), 'learning_rate': 'constant',
'solver': 'lbfgs'}}
scorer_order = ['precision', 'recall', 'f1', 'accuracy', 'tss', 'hss']
tsafit_dict = {'lat': 30.985556, 'lon': 35.263056,
'alt': -35.75, 'dt_utc': '2018-04-26T10:15:00'}
axis_southern_stations = ['Dimo', 'Ohad', 'Ddse', 'Yotv', 'Elat', 'Raha', 'Yaha']
soi_axis_dict = {'yrcm': 'Dimo',
'slom': 'Ohad',
'dsea': 'Ddse',
'nrif': 'Yotv',
'elat': 'Elat',
'klhv': 'Raha',
'spir': 'Yaha'}
def plot_mean_abs_shap_values_features(SV, fix_xticklabels=True):
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from natsort import natsorted
features = ['pwv', 'pressure', 'DOY']
# sns.set_palette('Dark2', 6)
sns.set_theme(style='ticks', font_scale=1.5)
# sns.set_style('whitegrid')
# sns.set_style('ticks')
sv = np.abs(SV).mean('sample').sel(clas=0).reset_coords(drop=True)
gr_spec = [20, 20, 1]
fig, axes = plt.subplots(1, 3, sharey=True, figsize=(17, 5), gridspec_kw={'width_ratios': gr_spec})
try:
axes.flatten()
except AttributeError:
axes = [axes]
for i, f in enumerate(features):
fe = [x for x in sv['feature'].values if f in x]
dsf = sv.sel(feature=fe).reset_coords(drop=True).to_dataframe()
title = '{}'.format(f.upper())
dsf.plot.bar(ax=axes[i], title=title, rot=0, legend=False, zorder=20,
width=.8, color='k', alpha=0.8)
axes[i].set_title(title)
dsf_sum = dsf.sum().tolist()
handles, labels = axes[i].get_legend_handles_labels()
labels = [
'{} ({:.1f} %)'.format(
x, y) for x, y in zip(
labels, dsf_sum)]
# axes[i].legend(handles=handles, labels=labels, prop={'size': fontsize-3}, loc='upper center')
axes[i].set_ylabel('mean(|SHAP value|)\n(average impact\non model output magnitude)')
axes[i].grid(axis='y', zorder=1)
if fix_xticklabels:
# n = sum(['pwv' in x for x in sv.feature.values])
axes[2].xaxis.set_ticklabels('')
axes[2].set_xlabel('')
hrs = np.arange(-1, -25, -1)
axes[0].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12)
axes[1].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12)
axes[2].tick_params()
axes[0].set_xlabel('Hours prior to flood')
axes[1].set_xlabel('Hours prior to flood')
fig.tight_layout()
filename = 'RF_shap_values_{}.png'.format('+'.join(features))
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def read_binary_classification_shap_values_to_pandas(shap_values, X):
import xarray as xr
SV0 = X.copy(data=shap_values[0])
SV1 = X.copy(data=shap_values[1])
SV = xr.concat([SV0, SV1], dim='clas')
SV['clas'] = [0, 1]
return SV
def get_shap_values_RF_classifier(plot=True):
import shap
X, y = combine_pos_neg_from_nc_file()
ml = ML_Classifier_Switcher()
rf = ml.pick_model('RF')
rf.set_params(**best_hp_models_dict['RF'])
X = select_doy_from_feature_list(X, features=['pwv', 'pressure', 'doy'])
rf.fit(X, y)
explainer = shap.TreeExplainer(rf)
shap_values = explainer.shap_values(X.values)
if plot:
shap.summary_plot(shap_values, X, feature_names=[
x for x in X.feature.values], max_display=49, sort=False)
return shap_values
def interpolate_pwv_to_tsafit_event(path=work_yuval, savepath=work_yuval):
import pandas as pd
import xarray as xr
from PW_stations import produce_geo_gnss_solved_stations
from interpolation_routines import interpolate_var_ds_at_multiple_dts
from aux_gps import save_ncfile
# get gnss soi-apn pwv data and geo-meta data:
geo_df = produce_geo_gnss_solved_stations(plot=False)
pw = xr.load_dataset(work_yuval/'GNSS_PW_thresh_50.nc')
pw = pw[[x for x in pw if '_error' not in x]]
pw = pw.sel(time=slice('2018-04-25', '2018-04-26'))
pw = pw.drop_vars(['elat', 'elro', 'csar', 'slom'])
# get tsafit data:
predict_df = pd.DataFrame(tsafit_dict, index=['tsafit'])
df_inter = interpolate_var_ds_at_multiple_dts(pw, geo_df, predict_df)
da=df_inter['interpolated_lr_fixed'].to_xarray()
da.name = 'pwv'
da.attrs['operation'] = 'interploated from SOI-APN PWV data'
da.attrs['WV scale height'] = 'variable from SOI-APN data'
da.attrs.update(**tsafit_dict)
if savepath is not None:
filename = 'Tsafit_PWV_event.nc'
save_ncfile(da, savepath, filename)
return da
def plot_tsafit_event(path=work_yuval):
import xarray as xr
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style='ticks', font_scale=1.5)
da = xr.load_dataarray(path / 'Tsafit_PWV_event.nc')
fig, ax = plt.subplots(figsize=(11, 8))
da_sliced = da.sel(time=slice('2018-04-26T00:00:00', '2018-04-26T12:00:00'))
# da_sliced.name = 'PWV [mm]'
da_sliced = da_sliced.rename({'time': 'Time [UTC]'})
da_sliced.to_dataframe().plot(ax=ax, ylabel='PWV [mm]', linewidth=2, marker='o', legend=False)
dt = pd.to_datetime(da.attrs['dt_utc'])
ax.axvline(dt, color='r', linestyle='--', linewidth=2, label='T')
handles, labels = ax.get_legend_handles_labels()
plt.legend(handles=handles, labels=['PWV', 'Tsafit Flood Event'])
ax.grid(True)
# ax.set_xlabel('Time [UTC]')
fig.tight_layout()
fig.suptitle('PWV from SOI-APN over Tsafit area on 2018-04-26')
fig.subplots_adjust(top=0.941)
return fig
# TODO: treat all pwv from events as follows:
# For each station:
# 0) rolling mean to all pwv 1 hour
# 1) take 288 points before events, if < 144 gone then drop
# 2) interpolate them 12H using spline/other
# 3) then, check if dts coinside 1 day before, if not concat all dts+pwv for each station
# 4) prepare features, such as pressure, doy, try to get pressure near the stations and remove the longterm hour dayofyear
# pressure in BD anoms is highly correlated with SEDOM (0.9) and ELAT (0.88) so no need for local pressure features
# fixed filling with jerusalem centre since 2 drag events dropped due to lack of data 2018-11 2019-02 in pressure
# 5) feature addition: should be like pwv steps 1-3,
# 6) negative events should be sampled separtely, for
# 7) now prepare pwv and pressure to single ds with 1 hourly sample rate
# 8) produce positives and save them to file!
# 9) produce a way to get negatives considering the positives
# maybe implement permutaion importance to pwv ? see what is more important to
# the model in 24 hours ? only on SVC and MLP ?
# implemetn TSS and HSS scores and test them (make_scorer from confusion matrix)
# redo results but with inner and outer splits of 4, 4
# plot and see best_score per refit-scorrer - this is the best score of GridSearchCV on the entire
# train/validation subset per each outerfold - basically see if the test_metric increased after the gridsearchcv as it should
# use holdout set
# implement repeatedstratifiedkfold and run it...
# check for stability of the gridsearch CV...also run with 4-folds ?
# finalize the permutation_importances and permutation_test_scores
def prepare_tide_events_GNSS_dataset(hydro_path=hydro_path):
import xarray as xr
import pandas as pd
import numpy as np
from aux_gps import xr_reindex_with_date_range
feats = xr.load_dataset(
hydro_path/'hydro_tides_hourly_features_with_positives.nc')
ds = feats['Tides'].to_dataset('GNSS').rename({'tide_event': 'time'})
da_list = []
for da in ds:
time = ds[da].dropna('time')
daa = time.copy(data=np.ones(time.shape))
daa['time'] = pd.to_datetime(time.values)
daa.name = time.name + '_tide'
da_list.append(daa)
ds = xr.merge(da_list)
li = [xr_reindex_with_date_range(ds[x], freq='H') for x in ds]
ds = xr.merge(li)
return ds
def select_features_from_X(X, features='pwv'):
if isinstance(features, str):
f = [x for x in X.feature.values if features in x]
X = X.sel(feature=f)
elif isinstance(features, list):
fs = []
for f in features:
fs += [x for x in X.feature.values if f in x]
X = X.sel(feature=fs)
return X
def combine_pos_neg_from_nc_file(hydro_path=hydro_path,
negative_sample_num=1,
seed=1, std=True):
from aux_gps import path_glob
from sklearn.utils import resample
import xarray as xr
import numpy as np
# import pandas as pd
if std:
file = path_glob(
hydro_path, 'hydro_tides_hourly_features_with_positives_negatives_std*.nc')[-1]
else:
file = path_glob(
hydro_path, 'hydro_tides_hourly_features_with_positives_negatives_*.nc')[-1]
ds = xr.open_dataset(file)
# get the positive features and produce target:
X_pos = ds['X_pos'].rename({'positive_sample': 'sample'})
y_pos = xr.DataArray(np.ones(X_pos['sample'].shape), dims=['sample'])
y_pos['sample'] = X_pos['sample']
# choose at random y_pos size of negative class:
X_neg = ds['X_neg'].rename({'negative_sample': 'sample'})
pos_size = y_pos['sample'].size
np.random.seed(seed)
# negatives = []
for n_samples in [x for x in range(negative_sample_num)]:
# dts = np.random.choice(X_neg['sample'], size=y_pos['sample'].size,
# replace=False)
# print(np.unique(dts).shape)
# negatives.append(X_neg.sel(sample=dts))
negative = resample(X_neg, replace=False,
n_samples=pos_size * negative_sample_num,
random_state=seed)
negatives = np.split(negative, negative_sample_num, axis=0)
Xs = []
ys = []
for X_negative in negatives:
y_neg = xr.DataArray(np.zeros(X_negative['sample'].shape), dims=['sample'])
y_neg['sample'] = X_negative['sample']
# now concat all X's and y's:
X = xr.concat([X_pos, X_negative], 'sample')
y = xr.concat([y_pos, y_neg], 'sample')
X.name = 'X'
Xs.append(X)
ys.append(y)
if len(negatives) == 1:
return Xs[0], ys[0]
else:
return Xs, ys
def drop_hours_in_pwv_pressure_features(X, last_hours=7, verbose=True):
import numpy as np
Xcopy = X.copy()
pwvs_to_drop = ['pwv_{}'.format(x) for x in np.arange(24-last_hours + 1, 25)]
if set(pwvs_to_drop).issubset(set(X.feature.values)):
if verbose:
print('dropping {} from X.'.format(', '.join(pwvs_to_drop)))
Xcopy = Xcopy.drop_sel(feature=pwvs_to_drop)
pressures_to_drop = ['pressure_{}'.format(x) for x in np.arange(24-last_hours + 1, 25)]
if set(pressures_to_drop).issubset(set(X.feature.values)):
if verbose:
print('dropping {} from X.'.format(', '.join(pressures_to_drop)))
Xcopy = Xcopy.drop_sel(feature=pressures_to_drop)
return Xcopy
def check_if_negatives_are_within_positives(neg_da, hydro_path=hydro_path):
import xarray as xr
import pandas as pd
pos_da = xr.open_dataset(
hydro_path / 'hydro_tides_hourly_features_with_positives.nc')['X']
dt_pos = pos_da.sample.to_dataframe()
dt_neg = neg_da.sample.to_dataframe()
dt_all = dt_pos.index.union(dt_neg.index)
dff = pd.DataFrame(dt_all, index=dt_all)
dff = dff.sort_index()
samples_within = dff[(dff.diff()['sample'] <= pd.Timedelta(1, unit='D'))]
num = samples_within.size
print('samples that are within a day of each other: {}'.format(num))
print('samples are: {}'.format(samples_within))
return dff
def produce_negatives_events_from_feature_file(hydro_path=hydro_path, seed=42,
batches=1, verbose=1, std=True):
# do the same thing for pressure (as for pwv), but not for
import xarray as xr
import numpy as np
import pandas as pd
from aux_gps import save_ncfile
feats = xr.load_dataset(hydro_path / 'hydro_tides_hourly_features.nc')
feats = feats.rename({'doy': 'DOY'})
if std:
pos_filename = 'hydro_tides_hourly_features_with_positives_std.nc'
else:
pos_filename = 'hydro_tides_hourly_features_with_positives.nc'
all_tides = xr.open_dataset(
hydro_path / pos_filename)['X_pos']
# pos_tides = xr.open_dataset(hydro_path / 'hydro_tides_hourly_features_with_positives.nc')['tide_datetimes']
tides = xr.open_dataset(
hydro_path / pos_filename)['Tides']
# get the positives (tide events) for each station:
df_stns = tides.to_dataset('GNSS').to_dataframe()
# get all positives (tide events) for all stations:
df = all_tides.positive_sample.to_dataframe()['positive_sample']
df.columns = ['sample']
stns = [x for x in hydro_pw_dict.keys()]
other_feats = ['DOY', 'doy_sin', 'doy_cos']
# main stns df features (pwv)
pwv_df = feats[stns].to_dataframe()
pressure = feats['bet-dagan'].to_dataframe()['bet-dagan']
# define the initial no_choice_dt_range from the positive dt_range:
no_choice_dt_range = [pd.date_range(
start=dt, periods=48, freq='H') for dt in df]
no_choice_dt_range = pd.DatetimeIndex(
np.unique(np.hstack(no_choice_dt_range)))
dts_to_choose_from = pwv_df.index.difference(no_choice_dt_range)
# dts_to_choose_from_pressure = pwv_df.index.difference(no_choice_dt_range)
# loop over all stns and produce negative events:
np.random.seed(seed)
neg_batches = []
for i in np.arange(1, batches + 1):
if verbose >= 0:
print('preparing batch {}:'.format(i))
neg_stns = []
for stn in stns:
dts_df = df_stns[stn].dropna()
pwv = pwv_df[stn].dropna()
# loop over all events in on stn:
negatives = []
negatives_pressure = []
# neg_samples = []
if verbose >= 1:
print('finding negatives for station {}, events={}'.format(
stn, len(dts_df)))
# print('finding negatives for station {}, dt={}'.format(stn, dt.strftime('%Y-%m-%d %H:%M')))
cnt = 0
while cnt < len(dts_df):
# get random number from each stn pwv:
# r = np.random.randint(low=0, high=len(pwv.index))
# random_dt = pwv.index[r]
random_dt = np.random.choice(dts_to_choose_from)
negative_dt_range = pd.date_range(
start=random_dt, periods=24, freq='H')
if not (no_choice_dt_range.intersection(negative_dt_range)).empty:
# print('#')
if verbose >= 2:
print('Overlap!')
continue
# get the actual pwv and check it is full (24hours):
negative = pwv.loc[pwv.index.intersection(negative_dt_range)]
neg_pressure = pressure.loc[pwv.index.intersection(
negative_dt_range)]
if len(negative.dropna()) != 24 or len(neg_pressure.dropna()) != 24:
# print('!')
if verbose >= 2:
print('NaNs!')
continue
if verbose >= 2:
print('number of dts that are already chosen: {}'.format(
len(no_choice_dt_range)))
negatives.append(negative)
negatives_pressure.append(neg_pressure)
# now add to the no_choice_dt_range the negative dt_range we just aquired:
negative_dt_range_with_padding = pd.date_range(
start=random_dt-pd.Timedelta(24, unit='H'), end=random_dt+pd.Timedelta(23, unit='H'), freq='H')
no_choice_dt_range = pd.DatetimeIndex(
np.unique(np.hstack([no_choice_dt_range, negative_dt_range_with_padding])))
dts_to_choose_from = dts_to_choose_from.difference(
no_choice_dt_range)
if verbose >= 2:
print('number of dts to choose from: {}'.format(
len(dts_to_choose_from)))
cnt += 1
neg_da = xr.DataArray(negatives, dims=['sample', 'feature'])
neg_da['feature'] = ['{}_{}'.format(
'pwv', x) for x in np.arange(1, 25)]
neg_samples = [x.index[0] for x in negatives]
neg_da['sample'] = neg_samples
neg_pre_da = xr.DataArray(
negatives_pressure, dims=['sample', 'feature'])
neg_pre_da['feature'] = ['{}_{}'.format(
'pressure', x) for x in np.arange(1, 25)]
neg_pre_samples = [x.index[0] for x in negatives_pressure]
neg_pre_da['sample'] = neg_pre_samples
neg_da = xr.concat([neg_da, neg_pre_da], 'feature')
neg_da = neg_da.sortby('sample')
neg_stns.append(neg_da)
da_stns = xr.concat(neg_stns, 'sample')
da_stns = da_stns.sortby('sample')
# now loop over the remaining features (which are stns agnostic)
# and add them with the same negative datetimes of the pwv already aquired:
dts = [pd.date_range(x.item(), periods=24, freq='H')
for x in da_stns['sample']]
dts_samples = [x[0] for x in dts]
other_feat_list = []
for feat in feats[other_feats]:
# other_feat_sample_list = []
da_other = xr.DataArray(feats[feat].sel(time=dts_samples).values, dims=['sample'])
# for dt in dts_samples:
# da_other = xr.DataArray(feats[feat].sel(
# time=dt).values, dims=['feature'])
da_other['sample'] = dts_samples
other_feat_list.append(da_other)
# other_feat_da = xr.concat(other_feat_sample_list, 'feature')
da_other_feats = xr.concat(other_feat_list, 'feature')
da_other_feats['feature'] = other_feats
da_stns = xr.concat([da_stns, da_other_feats], 'feature')
neg_batches.append(da_stns)
neg_batch_da = xr.concat(neg_batches, 'sample')
# neg_batch_da['batch'] = np.arange(1, batches + 1)
neg_batch_da.name = 'X_neg'
feats['X_neg'] = neg_batch_da
feats['X_pos'] = all_tides
feats['X_pwv_stns'] = tides
# feats['tide_datetimes'] = pos_tides
feats = feats.rename({'sample': 'negative_sample'})
if std:
filename = 'hydro_tides_hourly_features_with_positives_negatives_std_{}.nc'.format(
batches)
else:
filename = 'hydro_tides_hourly_features_with_positives_negatives_{}.nc'.format(
batches)
save_ncfile(feats, hydro_path, filename)
return neg_batch_da
def produce_positives_from_feature_file(hydro_path=hydro_path, std=True):
import xarray as xr
import pandas as pd
import numpy as np
from aux_gps import save_ncfile
# load features:
if std:
file = hydro_path / 'hydro_tides_hourly_features_std.nc'
else:
file = hydro_path / 'hydro_tides_hourly_features.nc'
feats = xr.load_dataset(file)
feats = feats.rename({'doy': 'DOY'})
# load positive event for each station:
dfs = [read_station_from_tide_database(hydro_pw_dict.get(
x), rounding='1H') for x in hydro_pw_dict.keys()]
dfs = check_if_tide_events_from_stations_are_within_time_window(
dfs, days=1, rounding=None, return_hs_list=True)
da_list = []
positives_per_station = []
for i, feat in enumerate(feats):
try:
_, _, pr = produce_pwv_days_before_tide_events(feats[feat], dfs[i],
plot=False, rolling=None,
days_prior=1,
drop_thresh=0.75,
max_gap='6H',
verbose=0)
print('getting positives from station {}'.format(feat))
positives = [pd.to_datetime(
(x[-1].time + pd.Timedelta(1, unit='H')).item()) for x in pr]
da = xr.DataArray(pr, dims=['sample', 'feature'])
da['sample'] = positives
positives_per_station.append(positives)
da['feature'] = ['pwv_{}'.format(x) for x in np.arange(1, 25)]
da_list.append(da)
except IndexError:
continue
da_pwv = xr.concat(da_list, 'sample')
da_pwv = da_pwv.sortby('sample')
# now add more features:
da_list = []
for feat in ['bet-dagan']:
print('getting positives from feature {}'.format(feat))
positives = []
for dt_end in da_pwv.sample:
dt_st = pd.to_datetime(dt_end.item()) - pd.Timedelta(24, unit='H')
dt_end_end = pd.to_datetime(
dt_end.item()) - pd.Timedelta(1, unit='H')
positive = feats[feat].sel(time=slice(dt_st, dt_end_end))
positives.append(positive)
da = xr.DataArray(positives, dims=['sample', 'feature'])
da['sample'] = da_pwv.sample
if feat == 'bet-dagan':
feat_name = 'pressure'
else:
feat_name = feat
da['feature'] = ['{}_{}'.format(feat_name, x)
for x in np.arange(1, 25)]
da_list.append(da)
da_f = xr.concat(da_list, 'feature')
da_list = []
for feat in ['DOY', 'doy_sin', 'doy_cos']:
print('getting positives from feature {}'.format(feat))
positives = []
for dt in da_pwv.sample:
positive = feats[feat].sel(time=dt)
positives.append(positive)
da = xr.DataArray(positives, dims=['sample'])
da['sample'] = da_pwv.sample
# da['feature'] = feat
da_list.append(da)
da_ff = xr.concat(da_list, 'feature')
da_ff['feature'] = ['DOY', 'doy_sin', 'doy_cos']
da = xr.concat([da_pwv, da_f, da_ff], 'feature')
if std:
filename = 'hydro_tides_hourly_features_with_positives_std.nc'
else:
filename = 'hydro_tides_hourly_features_with_positives.nc'
feats['X_pos'] = da
# now add positives per stations:
pdf = pd.DataFrame(positives_per_station).T
pdf.index.name = 'tide_event'
pos_da = pdf.to_xarray().to_array('GNSS')
pos_da['GNSS'] = [x for x in hydro_pw_dict.keys()]
pos_da.attrs['info'] = 'contains the datetimes of the tide events per GNSS station.'
feats['Tides'] = pos_da
# rename sample to positive sample:
feats = feats.rename({'sample': 'positive_sample'})
save_ncfile(feats, hydro_path, filename)
return feats
def prepare_features_and_save_hourly(work_path=work_yuval, ims_path=ims_path,
savepath=hydro_path, std=True):
import xarray as xr
from aux_gps import save_ncfile
import numpy as np
# pwv = xr.load_dataset(
if std:
pwv_filename = 'GNSS_PW_thresh_0_hour_dayofyear_anoms_sd.nc'
pre_filename = 'IMS_BD_hourly_anoms_std_ps_1964-2020.nc'
else:
pwv_filename = 'GNSS_PW_thresh_0_hour_dayofyear_anoms.nc'
pre_filename = 'IMS_BD_hourly_anoms_ps_1964-2020.nc'
# work_path / 'GNSS_PW_thresh_0_hour_dayofyear_anoms.nc')
pwv = xr.load_dataset(work_path / pwv_filename)
pwv_stations = [x for x in hydro_pw_dict.keys()]
pwv = pwv[pwv_stations]
# pwv = pwv.rolling(time=12, keep_attrs=True).mean(keep_attrs=True)
pwv = pwv.resample(time='1H', keep_attrs=True).mean(keep_attrs=True)
# bd = xr.load_dataset(ims_path / 'IMS_BD_anoms_5min_ps_1964-2020.nc')
bd = xr.load_dataset(ims_path / pre_filename)
# min_time = pwv.dropna('time')['time'].min()
# bd = bd.sel(time=slice('1996', None)).resample(time='1H').mean()
bd = bd.sel(time=slice('1996', None))
pressure = bd['bet-dagan']
doy = pwv['time'].copy(data=pwv['time'].dt.dayofyear)
doy.name = 'doy'
doy_sin = np.sin(doy * np.pi / 183)
doy_sin.name = 'doy_sin'
doy_cos = np.cos(doy * np.pi / 183)
doy_cos.name = 'doy_cos'
ds = xr.merge([pwv, pressure, doy, doy_sin, doy_cos])
if std:
filename = 'hydro_tides_hourly_features_std.nc'
else:
filename = 'hydro_tides_hourly_features.nc'
save_ncfile(ds, savepath, filename)
return ds
def plot_all_decompositions(X, y, n=2):
import xarray as xr
models = [
'PCA',
'LDA',
'ISO_MAP',
'LLE',
'LLE-modified',
'LLE-hessian',
'LLE-ltsa',
'MDA',
'RTE',
'SE',
'TSNE',
'NCA']
names = [
'Principal Components',
'Linear Discriminant',
'Isomap',
'Locally Linear Embedding',
'Modified LLE',
'Hessian LLE',
'Local Tangent Space Alignment',
'MDS embedding',
'Random forest',
'Spectral embedding',
't-SNE',
'NCA embedding']
name_dict = dict(zip(models, names))
da = xr.DataArray(models, dims=['model'])
da['model'] = models
fg = xr.plot.FacetGrid(da, col='model', col_wrap=4,
sharex=False, sharey=False)
for model_str, ax in zip(da['model'].values, fg.axes.flatten()):
model = model_str.split('-')[0]
method = model_str.split('-')[-1]
if model == method:
method = None
try:
ax = scikit_decompose(X, y, model=model, n=n, method=method, ax=ax)
except ValueError:
pass
ax.set_title(name_dict[model_str])
ax.set_xlabel('')
ax.set_ylabel('')
fg.fig.suptitle('various decomposition projections (n={})'.format(n))
return
def scikit_decompose(X, y, model='PCA', n=2, method=None, ax=None):
from sklearn import (manifold, decomposition, ensemble,
discriminant_analysis, neighbors)
import matplotlib.pyplot as plt
import pandas as pd
# from mpl_toolkits.mplot3d import Axes3D
n_neighbors = 30
if model == 'PCA':
X_decomp = decomposition.TruncatedSVD(n_components=n).fit_transform(X)
elif model == 'LDA':
X2 = X.copy()
X2.values.flat[::X.shape[1] + 1] += 0.01
X_decomp = discriminant_analysis.LinearDiscriminantAnalysis(n_components=n
).fit_transform(X2, y)
elif model == 'ISO_MAP':
X_decomp = manifold.Isomap(
n_neighbors, n_components=n).fit_transform(X)
elif model == 'LLE':
# method = 'standard', 'modified', 'hessian' 'ltsa'
if method is None:
method = 'standard'
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method=method)
X_decomp = clf.fit_transform(X)
elif model == 'MDA':
clf = manifold.MDS(n_components=n, n_init=1, max_iter=100)
X_decomp = clf.fit_transform(X)
elif model == 'RTE':
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=n)
X_decomp = pca.fit_transform(X_transformed)
elif model == 'SE':
embedder = manifold.SpectralEmbedding(n_components=n, random_state=0,
eigen_solver="arpack")
X_decomp = embedder.fit_transform(X)
elif model == 'TSNE':
tsne = manifold.TSNE(n_components=n, init='pca', random_state=0)
X_decomp = tsne.fit_transform(X)
elif model == 'NCA':
nca = neighbors.NeighborhoodComponentsAnalysis(init='random',
n_components=n, random_state=0)
X_decomp = nca.fit_transform(X, y)
df = pd.DataFrame(X_decomp)
df.columns = [
'{}_{}'.format(
model,
x +
1) for x in range(
X_decomp.shape[1])]
df['flood'] = y
df['flood'] = df['flood'].astype(int)
df_1 = df[df['flood'] == 1]
df_0 = df[df['flood'] == 0]
if X_decomp.shape[1] == 1:
if ax is not None:
df_1.plot.scatter(ax=ax,
x='{}_1'.format(model),
y='{}_1'.format(model),
color='b', marker='s', alpha=0.3,
label='1',
s=50)
else:
ax = df_1.plot.scatter(
x='{}_1'.format(model),
y='{}_1'.format(model),
color='b',
label='1',
s=50)
df_0.plot.scatter(
ax=ax,
x='{}_1'.format(model),
y='{}_1'.format(model),
color='r', marker='x',
label='0',
s=50)
elif X_decomp.shape[1] == 2:
if ax is not None:
df_1.plot.scatter(ax=ax,
x='{}_1'.format(model),
y='{}_2'.format(model),
color='b', marker='s', alpha=0.3,
label='1',
s=50)
else:
ax = df_1.plot.scatter(
x='{}_1'.format(model),
y='{}_2'.format(model),
color='b',
label='1',
s=50)
df_0.plot.scatter(
ax=ax,
x='{}_1'.format(model),
y='{}_2'.format(model),
color='r',
label='0',
s=50)
elif X_decomp.shape[1] == 3:
ax = plt.figure().gca(projection='3d')
# df_1.plot.scatter(x='{}_1'.format(model), y='{}_2'.format(model), z='{}_3'.format(model), color='b', label='1', s=50, ax=threedee)
ax.scatter(df_1['{}_1'.format(model)],
df_1['{}_2'.format(model)],
df_1['{}_3'.format(model)],
color='b',
label='1',
s=50)
ax.scatter(df_0['{}_1'.format(model)],
df_0['{}_2'.format(model)],
df_0['{}_3'.format(model)],
color='r',
label='0',
s=50)
ax.set_xlabel('{}_1'.format(model))
ax.set_ylabel('{}_2'.format(model))
ax.set_zlabel('{}_3'.format(model))
return ax
def permutation_scikit(X, y, cv=False, plot=True):
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import permutation_test_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
if not cv:
clf = SVC(C=0.01, break_ties=False, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma=0.032374575428176434,
kernel='poly', max_iter=-1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
clf = SVC(kernel='linear')
# clf = LinearDiscriminantAnalysis()
cv = StratifiedKFold(4, shuffle=True)
# cv = KFold(4, shuffle=True)
n_classes = 2
score, permutation_scores, pvalue = permutation_test_score(
clf, X, y, scoring="f1", cv=cv, n_permutations=1000, n_jobs=-1, verbose=2)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
plt.hist(permutation_scores, 20, label='Permutation scores',
edgecolor='black')
ylim = plt.ylim()
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
else:
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, shuffle=True, random_state=42)
param_grid = {
'C': np.logspace(-2, 3, 50), 'gamma': np.logspace(-2, 3, 50),
'kernel': ['rbf', 'poly', 'sigmoid']}
grid = GridSearchCV(SVC(), param_grid, refit=True, verbose=2)
grid.fit(X_train, y_train)
print(grid.best_estimator_)
grid_predictions = grid.predict(X_test)
print(confusion_matrix(y_test, grid_predictions))
print(classification_report(y_test, grid_predictions))
return
def grab_y_true_and_predict_from_sklearn_model(model, X, y, cv,
kfold_name='inner_kfold'):
from sklearn.model_selection import GridSearchCV
import xarray as xr
import numpy as np
if isinstance(model, GridSearchCV):
model = model.best_estimator_
ds_list = []
for i, (train, val) in enumerate(cv.split(X, y)):
model.fit(X[train], y[train])
y_true = y[val]
y_pred = model.predict(X[val])
try:
lr_probs = model.predict_proba(X[val])
# keep probabilities for the positive outcome only
lr_probs = lr_probs[:, 1]
except AttributeError:
lr_probs = model.decision_function(X[val])
y_true_da = xr.DataArray(y_true, dims=['sample'])
y_pred_da = xr.DataArray(y_pred, dims=['sample'])
y_prob_da = xr.DataArray(lr_probs, dims=['sample'])
ds = xr.Dataset()
ds['y_true'] = y_true_da
ds['y_pred'] = y_pred_da
ds['y_prob'] = y_prob_da
ds['sample'] = np.arange(0, len(X[val]))
ds_list.append(ds)
ds = xr.concat(ds_list, kfold_name)
ds[kfold_name] = np.arange(1, cv.n_splits + 1)
return ds
def produce_ROC_curves_from_model(model, X, y, cv, kfold_name='inner_kfold'):
import numpy as np
import xarray as xr
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
# TODO: collect all predictions and y_tests from this, also predict_proba
# and save, then calculte everything elsewhere.
if isinstance(model, GridSearchCV):
model = model.best_estimator_
tprs = []
aucs = []
pr = []
pr_aucs = []
mean_fpr = np.linspace(0, 1, 100)
for i, (train, val) in enumerate(cv.split(X, y)):
model.fit(X[train], y[train])
y_pred = model.predict(X[val])
try:
lr_probs = model.predict_proba(X[val])
# keep probabilities for the positive outcome only
lr_probs = lr_probs[:, 1]
except AttributeError:
lr_probs = model.decision_function(X[val])
fpr, tpr, _ = roc_curve(y[val], y_pred)
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(roc_auc_score(y[val], y_pred))
precision, recall, _ = precision_recall_curve(y[val], lr_probs)
pr.append(recall)
average_precision = average_precision_score(y[val], y_pred)
pr_aucs.append(average_precision)
# mean_tpr = np.mean(tprs, axis=0)
# mean_tpr[-1] = 1.0
# mean_auc = auc(mean_fpr, mean_tpr)
# std_auc = np.std(aucs)
# std_tpr = np.std(tprs, axis=0)
tpr_da = xr.DataArray(tprs, dims=[kfold_name, 'fpr'])
auc_da = xr.DataArray(aucs, dims=[kfold_name])
ds = xr.Dataset()
ds['TPR'] = tpr_da
ds['AUC'] = auc_da
ds['fpr'] = mean_fpr
ds[kfold_name] = np.arange(1, cv.n_splits + 1)
# variability for each tpr is ds['TPR'].std('kfold')
return ds
def cross_validation_with_holdout(X, y, model_name='SVC', features='pwv',
n_splits=3, test_ratio=0.25,
scorers=['f1', 'recall', 'tss', 'hss',
'precision', 'accuracy'],
seed=42, savepath=None, verbose=0,
param_grid='normal', n_jobs=-1,
n_repeats=None):
# from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import make_scorer
# from string import digits
import numpy as np
# import xarray as xr
scores_dict = {s: s for s in scorers}
if 'tss' in scorers:
scores_dict['tss'] = make_scorer(tss_score)
if 'hss' in scorers:
scores_dict['hss'] = make_scorer(hss_score)
X = select_doy_from_feature_list(X, model_name, features)
if param_grid == 'light':
print(np.unique(X.feature.values))
# first take out the hold-out set:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_ratio,
random_state=seed,
stratify=y)
if n_repeats is None:
# configure the cross-validation procedure
cv = StratifiedKFold(n_splits=n_splits, shuffle=True,
random_state=seed)
print('CV StratifiedKfolds of {}.'.format(n_splits))
# define the model and search space:
else:
cv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats,
random_state=seed)
print('CV RepeatedStratifiedKFold of {} with {} repeats.'.format(n_splits, n_repeats))
ml = ML_Classifier_Switcher()
print('param grid group is set to {}.'.format(param_grid))
sk_model = ml.pick_model(model_name, pgrid=param_grid)
search_space = ml.param_grid
# define search
gr_search = GridSearchCV(estimator=sk_model, param_grid=search_space,
cv=cv, n_jobs=n_jobs,
scoring=scores_dict,
verbose=verbose,
refit=False, return_train_score=True)
gr_search.fit(X, y)
if isinstance(features, str):
features = [features]
if savepath is not None:
filename = 'GRSRCHCV_holdout_{}_{}_{}_{}_{}_{}_{}.pkl'.format(
model_name, '+'.join(features), '+'.join(scorers), n_splits,
int(test_ratio*100), param_grid, seed)
save_gridsearchcv_object(gr_search, savepath, filename)
# gr, _ = process_gridsearch_results(
# gr_search, model_name, split_dim='kfold', features=X.feature.values)
# remove_digits = str.maketrans('', '', digits)
# features = list(set([x.translate(remove_digits).split('_')[0]
# for x in X.feature.values]))
# # add more attrs, features etc:
# gr.attrs['features'] = features
return gr_search
def select_doy_from_feature_list(X, model_name='RF', features='pwv'):
# first if RF chosen, replace the cyclic coords of DOY (sin and cos) with
# the DOY itself.
if isinstance(features, list):
feats = features.copy()
else:
feats = features
if model_name == 'RF' and 'doy' in features:
if isinstance(features, list):
feats.remove('doy')
feats.append('DOY')
elif isinstance(features, str):
feats = 'DOY'
elif model_name != 'RF' and 'doy' in features:
if isinstance(features, list):
feats.remove('doy')
feats.append('doy_sin')
feats.append('doy_cos')
elif isinstance(features, str):
feats = ['doy_sin']
feats.append('doy_cos')
X = select_features_from_X(X, feats)
return X
def single_cross_validation(X_val, y_val, model_name='SVC', features='pwv',
n_splits=4, scorers=['f1', 'recall', 'tss', 'hss',
'precision', 'accuracy'],
seed=42, savepath=None, verbose=0,
param_grid='normal', n_jobs=-1,
n_repeats=None, outer_split='1-1'):
# from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import GridSearchCV
# from sklearn.model_selection import train_test_split
from sklearn.metrics import make_scorer
# from string import digits
import numpy as np
# import xarray as xr
scores_dict = {s: s for s in scorers}
if 'tss' in scorers:
scores_dict['tss'] = make_scorer(tss_score)
if 'hss' in scorers:
scores_dict['hss'] = make_scorer(hss_score)
X = select_doy_from_feature_list(X_val, model_name, features)
y = y_val
if param_grid == 'light':
print(np.unique(X.feature.values))
if n_repeats is None:
# configure the cross-validation procedure
cv = StratifiedKFold(n_splits=n_splits, shuffle=True,
random_state=seed)
print('CV StratifiedKfolds of {}.'.format(n_splits))
# define the model and search space:
else:
cv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats,
random_state=seed)
print('CV RepeatedStratifiedKFold of {} with {} repeats.'.format(
n_splits, n_repeats))
ml = ML_Classifier_Switcher()
print('param grid group is set to {}.'.format(param_grid))
if outer_split == '1-1':
cv_type = 'holdout'
print('holdout cv is selected.')
else:
cv_type = 'nested'
print('nested cv {} out of {}.'.format(
outer_split.split('-')[0], outer_split.split('-')[1]))
sk_model = ml.pick_model(model_name, pgrid=param_grid)
search_space = ml.param_grid
# define search
gr_search = GridSearchCV(estimator=sk_model, param_grid=search_space,
cv=cv, n_jobs=n_jobs,
scoring=scores_dict,
verbose=verbose,
refit=False, return_train_score=True)
gr_search.fit(X, y)
if isinstance(features, str):
features = [features]
if savepath is not None:
filename = 'GRSRCHCV_{}_{}_{}_{}_{}_{}_{}_{}.pkl'.format(cv_type,
model_name, '+'.join(features), '+'.join(
scorers), n_splits,
outer_split, param_grid, seed)
save_gridsearchcv_object(gr_search, savepath, filename)
return gr_search
def save_cv_params_to_file(cv_obj, path, name):
import pandas as pd
di = vars(cv_obj)
splitter_type = cv_obj.__repr__().split('(')[0]
di['splitter_type'] = splitter_type
(pd.DataFrame.from_dict(data=di, orient='index')
.to_csv(path / '{}.csv'.format(name), header=False))
print('{}.csv saved to {}.'.format(name, path))
return
def read_cv_params_and_instantiate(filepath):
import pandas as pd
from sklearn.model_selection import StratifiedKFold
df = pd.read_csv(filepath, header=None, index_col=0)
d = {}
for row in df.iterrows():
dd = pd.to_numeric(row[1], errors='ignore')
if dd.item() == 'True' or dd.item() == 'False':
dd = dd.astype(bool)
d[dd.to_frame().columns.item()] = dd.item()
s_type = d.pop('splitter_type')
if s_type == 'StratifiedKFold':
cv = StratifiedKFold(**d)
return cv
def nested_cross_validation_procedure(X, y, model_name='SVC', features='pwv',
outer_splits=4, inner_splits=2,
refit_scorer='roc_auc',
scorers=['f1', 'recall', 'tss', 'hss',
'roc_auc', 'precision',
'accuracy'],
seed=42, savepath=None, verbose=0,
param_grid='normal', n_jobs=-1):
from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer
from sklearn.inspection import permutation_importance
from string import digits
import numpy as np
import xarray as xr
assert refit_scorer in scorers
scores_dict = {s: s for s in scorers}
if 'tss' in scorers:
scores_dict['tss'] = make_scorer(tss_score)
if 'hss' in scorers:
scores_dict['hss'] = make_scorer(hss_score)
X = select_doy_from_feature_list(X, model_name, features)
# if model_name == 'RF':
# doy = X['sample'].dt.dayofyear
# sel_doy = [x for x in X.feature.values if 'doy_sin' in x]
# doy_X = doy.broadcast_like(X.sel(feature=sel_doy))
# doy_X['feature'] = [
# 'doy_{}'.format(x) for x in range(
# doy_X.feature.size)]
# no_doy = [x for x in X.feature.values if 'doy' not in x]
# X = X.sel(feature=no_doy)
# X = xr.concat([X, doy_X], 'feature')
# else:
# # first slice X for features:
# if isinstance(features, str):
# f = [x for x in X.feature.values if features in x]
# X = X.sel(feature=f)
# elif isinstance(features, list):
# fs = []
# for f in features:
# fs += [x for x in X.feature.values if f in x]
# X = X.sel(feature=fs)
if param_grid == 'light':
print(np.unique(X.feature.values))
# configure the cross-validation procedure
cv_inner = StratifiedKFold(n_splits=inner_splits, shuffle=True,
random_state=seed)
print('Inner CV StratifiedKfolds of {}.'.format(inner_splits))
# define the model and search space:
ml = ML_Classifier_Switcher()
if param_grid == 'light':
print('disgnostic mode light.')
sk_model = ml.pick_model(model_name, pgrid=param_grid)
search_space = ml.param_grid
# define search
gr_search = GridSearchCV(estimator=sk_model, param_grid=search_space,
cv=cv_inner, n_jobs=n_jobs,
scoring=scores_dict,
verbose=verbose,
refit=refit_scorer, return_train_score=True)
# gr.fit(X, y)
# configure the cross-validation procedure
cv_outer = StratifiedKFold(
n_splits=outer_splits, shuffle=True, random_state=seed)
# execute the nested cross-validation
scores_est_dict = cross_validate(gr_search, X, y,
scoring=scores_dict,
cv=cv_outer, n_jobs=n_jobs,
return_estimator=True, verbose=verbose)
# perm = []
# for i, (train, val) in enumerate(cv_outer.split(X, y)):
# gr_model = scores_est_dict['estimator'][i]
# gr_model.fit(X[train], y[train])
# r = permutation_importance(gr_model, X[val], y[val],scoring='f1',
# n_repeats=30, n_jobs=-1,
# random_state=0)
# perm.append(r)
# get the test scores:
test_keys = [x for x in scores_est_dict.keys() if 'test' in x]
ds = xr.Dataset()
for key in test_keys:
ds[key] = xr.DataArray(scores_est_dict[key], dims=['outer_kfold'])
preds_ds = []
gr_ds = []
for est in scores_est_dict['estimator']:
gr, _ = process_gridsearch_results(
est, model_name, split_dim='inner_kfold', features=X.feature.values)
# somehow save gr:
gr_ds.append(gr)
preds_ds.append(
grab_y_true_and_predict_from_sklearn_model(est, X, y, cv_inner))
# tpr_ds.append(produce_ROC_curves_from_model(est, X, y, cv_inner))
dss = xr.concat(preds_ds, 'outer_kfold')
gr_dss = xr.concat(gr_ds, 'outer_kfold')
dss['outer_kfold'] = np.arange(1, cv_outer.n_splits + 1)
gr_dss['outer_kfold'] = np.arange(1, cv_outer.n_splits + 1)
# aggragate results:
dss = xr.merge([ds, dss])
dss = xr.merge([dss, gr_dss])
dss.attrs = gr_dss.attrs
dss.attrs['outer_kfold_splits'] = outer_splits
remove_digits = str.maketrans('', '', digits)
features = list(set([x.translate(remove_digits).split('_')[0]
for x in X.feature.values]))
# add more attrs, features etc:
dss.attrs['features'] = features
# rename major data_vars with model name:
# ys = [x for x in dss.data_vars if 'y_' in x]
# new_ys = [y + '_{}'.format(model_name) for y in ys]
# dss = dss.rename(dict(zip(ys, new_ys)))
# new_test_keys = [y + '_{}'.format(model_name) for y in test_keys]
# dss = dss.rename(dict(zip(test_keys, new_test_keys)))
# if isinstance(X.attrs['pwv_id'], list):
# dss.attrs['pwv_id'] = '-'.join(X.attrs['pwv_id'])
# else:
# dss.attrs['pwv_id'] = X.attrs['pwv_id']
# if isinstance(y.attrs['hydro_station_id'], list):
# dss.attrs['hs_id'] = '-'.join([str(x) for x in y.attrs['hydro_station_id']])
# else:
# dss.attrs['hs_id'] = y.attrs['hydro_station_id']
# dss.attrs['hydro_max_flow'] = y.attrs['max_flow']
# dss.attrs['neg_pos_ratio'] = y.attrs['neg_pos_ratio']
# save results to file:
if savepath is not None:
save_cv_results(dss, savepath=savepath)
return dss
# def ML_main_procedure(X, y, estimator=None, model_name='SVC', features='pwv',
# val_size=0.18, n_splits=None, test_size=0.2, seed=42, best_score='f1',
# savepath=None, plot=True):
# """split the X,y for train and test, either do HP tuning using HP_tuning
# with val_size or use already tuned (or not) estimator.
# models to play with = MLP, RF and SVC.
# n_splits = 2, 3, 4.
# features = pwv, pressure.
# best_score = f1, roc_auc, accuracy.
# can do loop on them. RF takes the most time to tune."""
# X = select_features_from_X(X, features)
# X_train, X_test, y_train, y_test = train_test_split(X, y,
# test_size=test_size,
# shuffle=True,
# random_state=seed)
# # do HP_tuning:
# if estimator is None:
# cvr, model = HP_tuning(X_train, y_train, model_name=model_name, val_size=val_size, test_size=test_size,
# best_score=best_score, seed=seed, savepath=savepath, n_splits=n_splits)
# else:
# model = estimator
# if plot:
# ax = plot_many_ROC_curves(model, X_test, y_test, name=model_name,
# ax=None)
# return ax
# else:
# return model
def plot_hyper_parameters_heatmaps_from_nested_CV_model(dss, path=hydro_path, model_name='MLP',
features='pwv+pressure+doy', save=True):
import matplotlib.pyplot as plt
ds = dss.sel(features=features).reset_coords(drop=True)
non_hp_vars = ['mean_score', 'std_score',
'test_score', 'roc_auc_score', 'TPR']
if model_name == 'RF':
non_hp_vars.append('feature_importances')
ds = ds[[x for x in ds if x not in non_hp_vars]]
seq = 'Blues'
cat = 'Dark2'
cmap_hp_dict = {
'alpha': seq, 'activation': cat,
'hidden_layer_sizes': cat, 'learning_rate': cat,
'solver': cat, 'kernel': cat, 'C': seq,
'gamma': seq, 'degree': seq, 'coef0': seq,
'max_depth': seq, 'max_features': cat,
'min_samples_leaf': seq, 'min_samples_split': seq,
'n_estimators': seq
}
# fix stuff for SVC:
if model_name == 'SVC':
ds['degree'] = ds['degree'].where(ds['kernel']=='poly')
ds['coef0'] = ds['coef0'].where(ds['kernel']=='poly')
# da = ds.to_arrray('hyper_parameters')
# fg = xr.plot.FacetGrid(
# da,
# col='hyper_parameters',
# sharex=False,
# sharey=False, figsize=(16, 10))
fig, axes = plt.subplots(5, 1, sharex=True, figsize=(4, 10))
for i, da in enumerate(ds):
df = ds[da].reset_coords(drop=True).to_dataset('scorer').to_dataframe()
df.index.name = 'Outer Split'
try:
df = df.astype(float).round(2)
except ValueError:
pass
cmap = cmap_hp_dict.get(da, 'Set1')
plot_heatmap_for_hyper_parameters_df(df, ax=axes[i], title=da, cmap=cmap)
fig.tight_layout()
if save:
filename = 'Hyper-parameters_nested_{}.png'.format(
model_name)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_heatmaps_for_hyper_parameters_data_splits(df1, df2, axes=None,
cmap='colorblind',
title=None, fig=None,
cbar_params=[.92, .12, .03, .75],
fontsize=12,
val_type='float'):
import pandas as pd
import seaborn as sns
import numpy as np
# from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import Normalize
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
sns.set_style('ticks')
sns.set_style('whitegrid')
sns.set(font_scale=1.2)
df1 = df1.astype(eval(val_type))
df2 = df2.astype(eval(val_type))
arr = pd.concat([df1, df2], axis=0).values.ravel()
value_to_int = {j: i for i, j in enumerate(
np.unique(arr))} # like you did
# try:
# sorted_v_to_i = dict(sorted(value_to_int.items()))
# except TypeError:
# sorted_v_to_i = value_to_int
# print(value_to_int)
n = len(value_to_int)
# discrete colormap (n samples from a given cmap)
cmap_list = sns.color_palette(cmap, n)
if val_type == 'float':
# print([value_to_int.keys()])
cbar_ticklabels = ['{:.2g}'.format(x) for x in value_to_int.keys()]
elif val_type == 'int':
cbar_ticklabels = [int(x) for x in value_to_int.keys()]
elif val_type == 'str':
cbar_ticklabels = [x for x in value_to_int.keys()]
if 'nan' in value_to_int.keys():
cmap_list[-1] = (0.5, 0.5, 0.5)
new_value_to_int = {}
for key, val in value_to_int.items():
try:
new_value_to_int[str(int(float(key)))] = val
except ValueError:
new_value_to_int['NR'] = val
cbar_ticklabels = [x for x in new_value_to_int.keys()]
# u1 = np.unique(df1.replace(value_to_int)).astype(int)
# cmap1 = [cmap_list[x] for x in u1]
# u2 = np.unique(df2.replace(value_to_int)).astype(int)
# cmap2 = [cmap_list[x] for x in u2]
# prepare normalizer
## Prepare bins for the normalizer
norm_bins = np.sort([*value_to_int.values()]) + 0.5
norm_bins = np.insert(norm_bins, 0, np.min(norm_bins) - 1.0)
# print(norm_bins)
## Make normalizer and formatter
norm = matplotlib.colors.BoundaryNorm(norm_bins, n, clip=True)
# normalizer = Normalize(np.array([x for x in value_to_int.values()])[0],np.array([x for x in value_to_int.values()])[-1])
# im=cm.ScalarMappable(norm=normalizer)
if axes is None:
fig, axes = plt.subplots(2, 1, sharex=True, sharey=False)
# divider = make_axes_locatable([axes[0], axes[1]])
# cbar_ax = divider.append_axes('right', size='5%', pad=0.05)
cbar_ax = fig.add_axes(cbar_params)
sns.heatmap(df1.replace(value_to_int), cmap=cmap_list, cbar=False,
ax=axes[0], linewidth=0.7, linecolor='k', square=True,
cbar_kws={"shrink": .9}, cbar_ax=cbar_ax, norm=norm)
sns.heatmap(df2.replace(value_to_int), cmap=cmap_list, cbar=False,
ax=axes[1], linewidth=0.7, linecolor='k', square=True,
cbar_kws={"shrink": .9}, cbar_ax=cbar_ax, norm=norm)
# else:
# ax = sns.heatmap(df.replace(sorted_v_to_i), cmap=cmap,
# ax=ax, linewidth=1, linecolor='k',
# square=False, cbar_kws={"shrink": .9})
if title is not None:
axes[0].set_title(title, fontsize=fontsize)
for ax in axes:
ax.set_xticklabels(ax.get_xticklabels(), ha='right', va='top', rotation=45)
ax.set_yticklabels(ax.get_yticklabels(), rotation=0)
ax.tick_params(labelsize=fontsize, direction='out', bottom=True,
left=True, length=2)
ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize)
ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize)
# colorbar = axes[0].collections[0].colorbar
# diff = norm_bins[1:] - norm_bins[:-1]
# tickz = norm_bins[:-1] + diff / 2
colorbar = fig.colorbar(cm.ScalarMappable(norm=norm, cmap=matplotlib.colors.ListedColormap(cmap_list)), ax=[axes[0], axes[1]],
shrink=1, pad=0.05, cax=cbar_ax)
# colorbar = plt.gca().images[-1].colorbar
r = colorbar.vmax - colorbar.vmin
colorbar.set_ticks([colorbar.vmin + r / n * (0.5 + i) for i in range(n)])
colorbar.ax.set_yticklabels(cbar_ticklabels, fontsize=fontsize-2)
return axes
def plot_hyper_parameters_heatmap_data_splits_per_model(dss4, dss5, fontsize=14,
save=True, model_name='SVC',
features='pwv+pressure+doy'):
import matplotlib.pyplot as plt
# import seaborn as sns
fig, axes = plt.subplots(2, 5, sharex=True, sharey=False ,figsize=(16, 5))
ds4 = dss4.sel(features=features).reset_coords(drop=True)
ds5 = dss5.sel(features=features).reset_coords(drop=True)
ds4 = ds4.reindex(scorer=scorer_order)
ds5 = ds5.reindex(scorer=scorer_order)
non_hp_vars = ['mean_score', 'std_score',
'test_score', 'roc_auc_score', 'TPR']
if model_name == 'RF':
non_hp_vars.append('feature_importances')
if model_name == 'MLP':
adj_dict=dict(
top=0.946,
bottom=0.145,
left=0.046,
right=0.937,
hspace=0.121,
wspace=0.652)
cb_st = 0.167
cb_mul = 0.193
else:
adj_dict=dict(
wspace = 0.477,
top=0.921,
bottom=0.17,
left=0.046,
right=0.937,
hspace=0.121)
cb_st = 0.18
cb_mul = 0.19
ds4 = ds4[[x for x in ds4 if x not in non_hp_vars]]
ds5 = ds5[[x for x in ds5 if x not in non_hp_vars]]
seq = 'Blues'
cat = 'Dark2'
hp_dict = {
'alpha': ['Reds', 'float'], 'activation': ['Set1_r', 'str'],
'hidden_layer_sizes': ['Paired', 'str'], 'learning_rate': ['Spectral_r', 'str'],
'solver': ['Dark2', 'str'], 'kernel': ['Dark2', 'str'], 'C': ['Blues', 'float'],
'gamma': ['Oranges', 'float'], 'degree': ['Greens', 'str'], 'coef0': ['Spectral', 'str'],
'max_depth': ['Blues', 'int'], 'max_features': ['Dark2', 'str'],
'min_samples_leaf': ['Greens', 'int'], 'min_samples_split': ['Reds', 'int'],
'n_estimators': ['Oranges', 'int']
}
# fix stuff for SVC:
if model_name == 'SVC':
ds4['degree'] = ds4['degree'].where(ds4['kernel']=='poly')
ds4['coef0'] = ds4['coef0'].where(ds4['kernel']=='poly')
ds5['degree'] = ds5['degree'].where(ds5['kernel']=='poly')
ds5['coef0'] = ds5['coef0'].where(ds5['kernel']=='poly')
for i, (da4, da5) in enumerate(zip(ds4, ds5)):
df4 = ds4[da4].reset_coords(drop=True).to_dataset('scorer').to_dataframe()
df5 = ds5[da5].reset_coords(drop=True).to_dataset('scorer').to_dataframe()
df4.index.name = 'Outer Split'
df5.index.name = 'Outer Split'
# try:
# df4 = df4.astype(float).round(2)
# df5 = df5.astype(float).round(2)
# except ValueError:
# pass
cmap = hp_dict.get(da4, 'Set1')[0]
val_type = hp_dict.get(da4, 'int')[1]
cbar_params = [cb_st + cb_mul*float(i), .175, .01, .71]
plot_heatmaps_for_hyper_parameters_data_splits(df4,
df5,
axes=[axes[0, i], axes[1, i]],
fig=fig,
title=da4,
cmap=cmap,
cbar_params=cbar_params,
fontsize=fontsize,
val_type=val_type)
if i > 0 :
axes[0, i].set_ylabel('')
axes[0, i].yaxis.set_tick_params(labelleft=False)
axes[1, i].set_ylabel('')
axes[1, i].yaxis.set_tick_params(labelleft=False)
fig.tight_layout()
fig.subplots_adjust(**adj_dict)
if save:
filename = 'Hyper-parameters_nested_{}.png'.format(
model_name)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def plot_heatmap_for_hyper_parameters_df(df, ax=None, cmap='colorblind',
title=None, fontsize=12):
import pandas as pd
import seaborn as sns
import numpy as np
sns.set_style('ticks')
sns.set_style('whitegrid')
sns.set(font_scale=1.2)
value_to_int = {j: i for i, j in enumerate(
sorted(pd.unique(df.values.ravel())))} # like you did
# for key in value_to_int.copy().keys():
# try:
# if np.isnan(key):
# value_to_int['NA'] = value_to_int.pop(key)
# df = df.fillna('NA')
# except TypeError:
# pass
try:
sorted_v_to_i = dict(sorted(value_to_int.items()))
except TypeError:
sorted_v_to_i = value_to_int
n = len(value_to_int)
# discrete colormap (n samples from a given cmap)
cmap = sns.color_palette(cmap, n)
if ax is None:
ax = sns.heatmap(df.replace(sorted_v_to_i), cmap=cmap,
linewidth=1, linecolor='k', square=False,
cbar_kws={"shrink": .9})
else:
ax = sns.heatmap(df.replace(sorted_v_to_i), cmap=cmap,
ax=ax, linewidth=1, linecolor='k',
square=False, cbar_kws={"shrink": .9})
if title is not None:
ax.set_title(title, fontsize=fontsize)
ax.set_xticklabels(ax.get_xticklabels(), rotation=30)
ax.set_yticklabels(ax.get_yticklabels(), rotation=0)
ax.tick_params(labelsize=fontsize)
ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize)
ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize)
colorbar = ax.collections[0].colorbar
r = colorbar.vmax - colorbar.vmin
colorbar.set_ticks([colorbar.vmin + r / n * (0.5 + i) for i in range(n)])
colorbar.set_ticklabels(list(value_to_int.keys()))
return ax
# def plot_ROC_curves_for_all_models_and_scorers(dss, save=False,
# fontsize=24, fig_split=1,
# feat=['pwv', 'pwv+pressure', 'pwv+pressure+doy']):
# import xarray as xr
# import seaborn as sns
# import matplotlib.pyplot as plt
# import pandas as pd
# cmap = sns.color_palette('tab10', len(feat))
# sns.set_style('whitegrid')
# sns.set_style('ticks')
# if fig_split == 1:
# dss = dss.sel(scorer=['precision', 'recall', 'f1'])
# elif fig_split == 2:
# dss = dss.sel(scorer=['accuracy', 'tss', 'hss'])
# fg = xr.plot.FacetGrid(
# dss,
# col='model',
# row='scorer',
# sharex=True,
# sharey=True, figsize=(20, 20))
# for i in range(fg.axes.shape[0]): # i is rows
# for j in range(fg.axes.shape[1]): # j is cols
# ax = fg.axes[i, j]
# modelname = dss['model'].isel(model=j).item()
# scorer = dss['scorer'].isel(scorer=i).item()
# chance_plot = [False for x in feat]
# chance_plot[-1] = True
# for k, f in enumerate(feat):
# # name = '{}-{}-{}'.format(modelname, scoring, feat)
# # model = dss.isel({'model': j, 'scoring': i}).sel(
# # {'features': feat})
# model = dss.isel({'model': j, 'scorer': i}
# ).sel({'features': f})
# # return model
# title = 'ROC of {} model ({})'.format(modelname.replace('SVC', 'SVM'), scorer)
# try:
# ax = plot_ROC_curve_from_dss_nested_CV(model, outer_dim='outer_split',
# plot_chance=[k],
# main_label=f,
# ax=ax,
# color=cmap[k], title=title,
# fontsize=fontsize)
# except ValueError:
# ax.grid('on')
# continue
# handles, labels = ax.get_legend_handles_labels()
# lh_ser = pd.Series(labels, index=handles).drop_duplicates()
# lh_ser = lh_ser.sort_values(ascending=False)
# hand = lh_ser.index.values
# labe = lh_ser.values
# ax.legend(handles=hand.tolist(), labels=labe.tolist(), loc="lower right",
# fontsize=fontsize-7)
# ax.grid('on')
# if j >= 1:
# ax.set_ylabel('')
# if fig_split == 1:
# ax.set_xlabel('')
# ax.tick_params(labelbottom=False)
# else:
# if i <= 1:
# ax.set_xlabel('')
# # title = '{} station: {} total events'.format(
# # station.upper(), events)
# # if max_flow > 0:
# # title = '{} station: {} total events (max flow = {} m^3/sec)'.format(
# # station.upper(), events, max_flow)
# # fg.fig.suptitle(title, fontsize=fontsize)
# fg.fig.tight_layout()
# fg.fig.subplots_adjust(top=0.937,
# bottom=0.054,
# left=0.039,
# right=0.993,
# hspace=0.173,
# wspace=0.051)
# if save:
# filename = 'ROC_curves_nested_{}_figsplit_{}.png'.format(
# dss['outer_split'].size, fig_split)
# plt.savefig(savefig_path / filename, bbox_inches='tight')
# return fg
def plot_hydro_ML_models_results_from_dss(dss, std_on='outer',
save=False, fontsize=16,
plot_type='ROC', split=1,
feat=['pwv', 'pressure+pwv', 'doy+pressure+pwv']):
import xarray as xr
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
cmap = sns.color_palette("colorblind", len(feat))
if split == 1:
dss = dss.sel(scoring=['f1', 'precision', 'recall'])
elif split == 2:
dss = dss.sel(scoring=['tss', 'hss', 'roc-auc', 'accuracy'])
fg = xr.plot.FacetGrid(
dss,
col='model',
row='scoring',
sharex=True,
sharey=True, figsize=(20, 20))
for i in range(fg.axes.shape[0]): # i is rows
for j in range(fg.axes.shape[1]): # j is cols
ax = fg.axes[i, j]
modelname = dss['model'].isel(model=j).item()
scoring = dss['scoring'].isel(scoring=i).item()
chance_plot = [False for x in feat]
chance_plot[-1] = True
for k, f in enumerate(feat):
# name = '{}-{}-{}'.format(modelname, scoring, feat)
# model = dss.isel({'model': j, 'scoring': i}).sel(
# {'features': feat})
model = dss.isel({'model': j, 'scoring': i}
).sel({'features': f})
title = '{} of {} model ({})'.format(
plot_type, modelname, scoring)
try:
plot_ROC_PR_curve_from_dss(model, outer_dim='outer_kfold',
inner_dim='inner_kfold',
plot_chance=[k],
main_label=f, plot_type=plot_type,
plot_std_legend=False, ax=ax,
color=cmap[k], title=title,
std_on=std_on, fontsize=fontsize)
except ValueError:
ax.grid('on')
continue
handles, labels = ax.get_legend_handles_labels()
hand = pd.Series(
labels, index=handles).drop_duplicates().index.values
labe = pd.Series(labels, index=handles).drop_duplicates().values
ax.legend(handles=hand.tolist(), labels=labe.tolist(), loc="lower right",
fontsize=14)
ax.grid('on')
# title = '{} station: {} total events'.format(
# station.upper(), events)
# if max_flow > 0:
# title = '{} station: {} total events (max flow = {} m^3/sec)'.format(
# station.upper(), events, max_flow)
# fg.fig.suptitle(title, fontsize=fontsize)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.937,
bottom=0.054,
left=0.039,
right=0.993,
hspace=0.173,
wspace=0.051)
if save:
filename = 'hydro_models_on_{}_{}_std_on_{}_{}.png'.format(
dss['inner_kfold'].size, dss['outer_kfold'].size,
std_on, plot_type)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
# def plot_hydro_ML_models_result(model_da, nsplits=2, station='drag',
# test_size=20, n_splits_plot=None, save=False):
# import xarray as xr
# import seaborn as sns
# import matplotlib.pyplot as plt
# from sklearn.model_selection import train_test_split
# # TODO: add plot_roc_curve(model, X_other_station, y_other_station)
# # TODO: add pw_station, hs_id
# cmap = sns.color_palette("colorblind", 3)
# X, y = produce_X_y(station, hydro_pw_dict[station], neg_pos_ratio=1)
# events = int(y[y == 1].sum().item())
# model_da = model_da.sel(
# splits=nsplits,
# test_size=test_size).reset_coords(
# drop=True)
## just_pw = [x for x in X.feature.values if 'pressure' not in x]
## X_pw = X.sel(feature=just_pw)
# fg = xr.plot.FacetGrid(
# model_da,
# col='model',
# row='scoring',
# sharex=True,
# sharey=True, figsize=(20, 20))
# for i in range(fg.axes.shape[0]): # i is rows
# for j in range(fg.axes.shape[1]): # j is cols
# ax = fg.axes[i, j]
# modelname = model_da['model'].isel(model=j).item()
# scoring = model_da['scoring'].isel(scoring=i).item()
# chance_plot = [False, False, True]
# for k, feat in enumerate(model_da['feature'].values):
# name = '{}-{}-{}'.format(modelname, scoring, feat)
# model = model_da.isel({'model': j, 'scoring': i}).sel({'feature': feat}).item()
# title = 'ROC of {} model ({})'.format(modelname, scoring)
# if not '+' in feat:
# f = [x for x in X.feature.values if feat in x]
# X_f = X.sel(feature=f)
# else:
# X_f = X
# X_train, X_test, y_train, y_test = train_test_split(
# X_f, y, test_size=test_size/100, shuffle=True, random_state=42)
#
# plot_many_ROC_curves(model, X_f, y, name=name,
# color=cmap[k], ax=ax,
# plot_chance=chance_plot[k],
# title=title, n_splits=n_splits_plot)
# fg.fig.suptitle('{} station: {} total_events, test_events = {}, n_splits = {}'.format(station.upper(), events, int(events* test_size/100), nsplits))
# fg.fig.tight_layout()
# fg.fig.subplots_adjust(top=0.937,
# bottom=0.054,
# left=0.039,
# right=0.993,
# hspace=0.173,
# wspace=0.051)
# if save:
# plt.savefig(savefig_path / 'try.png', bbox_inches='tight')
# return fg
def order_features_list(flist):
""" order the feature list in load_ML_run_results
so i don't get duplicates"""
import pandas as pd
import numpy as np
# first get all features:
li = [x.split('+') for x in flist]
flat_list = [item for sublist in li for item in sublist]
f = list(set(flat_list))
nums = np.arange(1, len(f)+1)
# now assagin a number for each entry:
inds = []
for x in flist:
for fe, num in zip(f, nums):
x = x.replace(fe, str(10**num))
inds.append(eval(x))
ser = pd.Series(inds)
ser.index = flist
ser1 = ser.drop_duplicates()
di = dict(zip(ser1.values, ser1.index))
new_flist = []
for ind, feat in zip(inds, flist):
new_flist.append(di.get(ind))
return new_flist
def smart_add_dataarray_to_ds_list(dsl, da_name='feature_importances'):
"""add data array to ds_list even if it does not exist, use shape of
data array that exists in other part of ds list"""
import numpy as np
import xarray as xr
# print(da_name)
fi = [x for x in dsl if da_name in x][0]
print(da_name, fi[da_name].shape)
fi = fi[da_name].copy(data=np.zeros(shape=fi[da_name].shape))
new_dsl = []
for ds in dsl:
if da_name not in ds:
ds = xr.merge([ds, fi], combine_attrs='no_conflicts')
new_dsl.append(ds)
return new_dsl
def load_ML_run_results(path=hydro_ml_path, prefix='CVR',
change_DOY_to_doy=True):
from aux_gps import path_glob
import xarray as xr
# from aux_gps import save_ncfile
import pandas as pd
import numpy as np
print('loading hydro ML results for all models and features')
# print('loading hydro ML results for station {}'.format(pw_station))
model_files = path_glob(path, '{}_*.nc'.format(prefix))
model_files = sorted(model_files)
# model_files = [x for x in model_files if pw_station in x.as_posix()]
ds_list = [xr.load_dataset(x) for x in model_files]
if change_DOY_to_doy:
for ds in ds_list:
if 'DOY' in ds.features:
new_feats = [x.replace('DOY', 'doy') for x in ds['feature'].values]
ds['feature'] = new_feats
ds.attrs['features'] = [x.replace('DOY', 'doy') for x in ds.attrs['features']]
model_as_str = [x.as_posix().split('/')[-1].split('.')[0]
for x in model_files]
model_names = [x.split('_')[1] for x in model_as_str]
model_scores = [x.split('_')[3] for x in model_as_str]
model_features = [x.split('_')[2] for x in model_as_str]
if change_DOY_to_doy:
model_features = [x.replace('DOY', 'doy') for x in model_features]
new_model_features = order_features_list(model_features)
ind = pd.MultiIndex.from_arrays(
[model_names,
new_model_features,
model_scores],
names=(
'model',
'features',
'scoring'))
# ind1 = pd.MultiIndex.from_product([model_names, model_scores, model_features], names=[
# 'model', 'scoring', 'feature'])
# ds_list = [x[data_vars] for x in ds_list]
# complete non-existant fields like best and fi for all ds:
data_vars = [x for x in ds_list[0] if x.startswith('test')]
# data_vars += ['AUC', 'TPR']
data_vars += [x for x in ds_list[0] if x.startswith('y_')]
bests = [[x for x in y if x.startswith('best')] for y in ds_list]
data_vars += list(set([y for x in bests for y in x]))
if 'RF' in model_names:
data_vars += ['feature_importances']
new_ds_list = []
for dvar in data_vars:
ds_list = smart_add_dataarray_to_ds_list(ds_list, dvar)
# # check if all data vars are in each ds and merge them:
new_ds_list = [xr.merge([y[x] for x in data_vars if x in y],
combine_attrs='no_conflicts') for y in ds_list]
# concat all
dss = xr.concat(new_ds_list, dim='dim_0')
dss['dim_0'] = ind
dss = dss.unstack('dim_0')
# dss.attrs['pwv_id'] = pw_station
# fix roc_auc to roc-auc in dss datavars
dss = dss.rename_vars({'test_roc_auc': 'test_roc-auc'})
# dss['test_roc_auc'].name = 'test_roc-auc'
print('calculating ROC, PR metrics.')
dss = calculate_metrics_from_ML_dss(dss)
print('Done!')
return dss
def plot_nested_CV_test_scores(dss, feats=None, fontsize=16,
save=True, wv_label='pwv'):
import seaborn as sns
import matplotlib.pyplot as plt
from aux_gps import convert_da_to_long_form_df
import numpy as np
import xarray as xr
def change_width(ax, new_value) :
for patch in ax.patches :
current_width = patch.get_width()
diff = current_width - new_value
# we change the bar width
patch.set_width(new_value)
# we recenter the bar
patch.set_x(patch.get_x() + diff * .5)
def show_values_on_bars(axs, fs=12, fw='bold', exclude_bar_num=None):
import numpy as np
def _show_on_single_plot(ax, exclude_bar_num=3):
for i, p in enumerate(ax.patches):
if i != exclude_bar_num and exclude_bar_num is not None:
_x = p.get_x() + p.get_width() / 2
_y = p.get_y() + p.get_height()
value = '{:.2f}'.format(p.get_height())
ax.text(_x, _y, value, ha="right",
fontsize=fs, fontweight=fw, zorder=20)
if isinstance(axs, np.ndarray):
for idx, ax in np.ndenumerate(axs):
_show_on_single_plot(ax, exclude_bar_num)
else:
_show_on_single_plot(axs, exclude_bar_num)
splits = dss['outer_split'].size
try:
assert 'best' in dss.attrs['comment']
best = True
except AssertionError:
best = False
except KeyError:
best = False
if 'neg_sample' in dss.dims:
neg = dss['neg_sample'].size
else:
neg = 1
if 'model' not in dss.dims:
dss = dss.expand_dims('model')
dss['model'] = [dss.attrs['model']]
dss = dss.sortby('model', ascending=False)
dss = dss.reindex(scorer=scorer_order)
if feats is None:
feats = ['pwv', 'pwv+pressure', 'pwv+pressure+doy']
dst = dss.sel(features=feats) # .reset_coords(drop=True)
# df = dst['test_score'].to_dataframe()
# df['scorer'] = df.index.get_level_values(3)
# df['model'] = df.index.get_level_values(0)
# df['features'] = df.index.get_level_values(1)
# df['outer_splits'] = df.index.get_level_values(2)
# df['model'] = df['model'].str.replace('SVC', 'SVM')
# df = df.melt(value_vars='test_score', id_vars=[
# 'features', 'model', 'scorer', 'outer_splits'], var_name='test_score',
# value_name='score')
da = dst['test_score']
if len(feats) == 5:
da_empty = da.isel(features=0).copy(
data=np.zeros(da.isel(features=0).shape))
da_empty['features'] = 'empty'
da = xr.concat([da, da_empty], 'features')
da = da.reindex(features=['doy', 'pressure', 'pwv',
'empty', 'pwv+pressure', 'pwv+pressure+doy'])
da.name = 'feature groups'
df = convert_da_to_long_form_df(da, value_name='score',
var_name='feature groups')
sns.set(font_scale=1.5)
sns.set_style('whitegrid')
sns.set_style('ticks')
cmap = sns.color_palette('tab10', n_colors=len(feats))
if len(feats) == 5:
cmap = ['tab:purple', 'tab:brown', 'tab:blue', 'tab:blue',
'tab:orange', 'tab:green']
fg = sns.FacetGrid(data=df, row='model', col='scorer', height=4, aspect=0.9)
# fg.map_dataframe(sns.stripplot, x="test_score", y="score", hue="features",
# data=df, dodge=True, alpha=1, zorder=1, palette=cmap)
# fg.map_dataframe(sns.pointplot, x="test_score", y="score", hue="features",
# data=df, dodge=True, join=False, palette=cmap,
# markers="o", scale=.75, ci=None)
fg.map_dataframe(sns.barplot, x='feature groups', y="score", hue='features',
ci='sd', capsize=None, errwidth=2, errcolor='k',
palette=cmap, dodge=True)
# g = sns.catplot(x='test_score', y="score", hue='features',
# col="scorer", row='model', ci='sd',
# data=df, kind="bar", capsize=0.25,
# height=4, aspect=1.5, errwidth=1.5)
#fg.set_xticklabels(rotation=45)
# fg.set_yticklabels([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=fontsize)
fg.set_ylabels('score')
[x.grid(True) for x in fg.axes.flatten()]
handles, labels = fg.axes[0, 0].get_legend_handles_labels()
if len(feats) == 5:
del handles[3]
del labels[3]
show_values_on_bars(fg.axes, fs=fontsize-4, exclude_bar_num=3)
for i in range(fg.axes.shape[0]): # i is rows
model = dss['model'].isel(model=i).item()
if model == 'SVC':
model = 'SVM'
for j in range(fg.axes.shape[1]): # j is cols
ax = fg.axes[i, j]
scorer = dss['scorer'].isel(scorer=j).item()
title = '{} | scorer={}'.format(model, scorer)
ax.set_title(title, fontsize=fontsize)
ax.set_xlabel('')
ax.set_ylim(0, 1)
change_width(ax, 0.110)
fg.set_xlabels(' ')
if wv_label is not None:
labels = [x.replace('pwv', wv_label) for x in labels]
fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=len(feats), fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
# true_scores = dst.sel(scorer=scorer, model=model)['true_score']
# dss['permutation_score'].plot.hist(ax=ax, bins=25, color=color)
# ymax = ax.get_ylim()[-1] - 0.2
# ax.vlines(x=true_scores.values, ymin=0, ymax=ymax, linestyle='--', color=cmap)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.92)
if save:
if best:
filename = 'ML_scores_models_nested_CV_best_hp_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
else:
filename = 'ML_scores_models_nested_CV_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_holdout_test_scores(dss, feats='pwv+pressure+doy'):
import seaborn as sns
import matplotlib.pyplot as plt
def show_values_on_bars(axs, fs=12, fw='bold'):
import numpy as np
def _show_on_single_plot(ax):
for p in ax.patches:
_x = p.get_x() + p.get_width() / 2
_y = p.get_y() + p.get_height()
value = '{:.2f}'.format(p.get_height())
ax.text(_x, _y, value, ha="center", fontsize=fs, fontweight=fw)
if isinstance(axs, np.ndarray):
for idx, ax in np.ndenumerate(axs):
_show_on_single_plot(ax)
else:
_show_on_single_plot(axs)
if feats is None:
feats = ['pwv', 'pwv+pressure', 'pwv+pressure+doy']
dst = dss.sel(features=feats) # .reset_coords(drop=True)
df = dst['holdout_test_scores'].to_dataframe()
df['scorer'] = df.index.droplevel(1).droplevel(0)
df['model'] = df.index.droplevel(2).droplevel(1)
df['features'] = df.index.droplevel(2).droplevel(0)
df['model'] = df['model'].str.replace('SVC', 'SVM')
df = df.melt(value_vars='holdout_test_scores', id_vars=[
'features', 'model', 'scorer'], var_name='test_score')
sns.set(font_scale=1.5)
sns.set_style('whitegrid')
sns.set_style('ticks')
g = sns.catplot(x="model", y="value", hue='features',
col="scorer", ci='sd', row=None,
col_wrap=3,
data=df, kind="bar", capsize=0.15,
height=4, aspect=1.5, errwidth=0.8)
g.set_xticklabels(rotation=45)
[x.grid(True) for x in g.axes.flatten()]
show_values_on_bars(g.axes)
filename = 'ML_scores_models_holdout_{}.png'.format('_'.join(feats))
plt.savefig(savefig_path / filename, bbox_inches='tight')
return df
def prepare_test_df_to_barplot_from_dss(dss, feats='doy+pwv+pressure',
plot=True, splitfigs=True):
import seaborn as sns
import matplotlib.pyplot as plt
dvars = [x for x in dss if 'test_' in x]
scores = [x.split('_')[-1] for x in dvars]
dst = dss[dvars]
# dst['scoring'] = [x+'_inner' for x in dst['scoring'].values]
# for i, ds in enumerate(dst):
# dst[ds] = dst[ds].sel(scoring=scores[i]).reset_coords(drop=True)
if feats is None:
feats = ['pwv', 'pressure+pwv', 'doy+pressure+pwv']
dst = dst.sel(features=feats) # .reset_coords(drop=True)
dst = dst.rename_vars(dict(zip(dvars, scores)))
# dst = dst.drop('scoring')
df = dst.to_dataframe()
# dfu = df
df['inner score'] = df.index.droplevel(2).droplevel(1).droplevel(0)
df['features'] = df.index.droplevel(2).droplevel(2).droplevel(1)
df['model'] = df.index.droplevel(2).droplevel(0).droplevel(1)
df = df.melt(value_vars=scores, id_vars=[
'features', 'model', 'inner score'], var_name='outer score')
# return dfu
# dfu.columns = dfu.columns.droplevel(1)
# dfu = dfu.T
# dfu['score'] = dfu.index
# dfu = dfu.reset_index()
# df = dfu.melt(value_vars=['MLP', 'RF', 'SVC'], id_vars=['score'])
df1 = df[(df['inner score']=='f1') | (df['inner score']=='precision') | (df['inner score']=='recall')]
df2 = df[(df['inner score']=='hss') | (df['inner score']=='tss') | (df['inner score']=='roc-auc') | (df['inner score']=='accuracy')]
if plot:
sns.set(font_scale = 1.5)
sns.set_style('whitegrid')
sns.set_style('ticks')
if splitfigs:
g = sns.catplot(x="outer score", y="value", hue='features',
col="inner score", ci='sd',row='model',
data=df1, kind="bar", capsize=0.15,
height=4, aspect=1.5,errwidth=0.8)
g.set_xticklabels(rotation=45)
filename = 'ML_scores_models_{}_1.png'.format('_'.join(feats))
plt.savefig(savefig_path / filename, bbox_inches='tight')
g = sns.catplot(x="outer score", y="value", hue='features',
col="inner score", ci='sd',row='model',
data=df2, kind="bar", capsize=0.15,
height=4, aspect=1.5,errwidth=0.8)
g.set_xticklabels(rotation=45)
filename = 'ML_scores_models_{}_2.png'.format('_'.join(feats))
plt.savefig(savefig_path / filename, bbox_inches='tight')
else:
g = sns.catplot(x="outer score", y="value", hue='features',
col="inner score", ci='sd',row='model',
data=df, kind="bar", capsize=0.15,
height=4, aspect=1.5,errwidth=0.8)
g.set_xticklabels(rotation=45)
filename = 'ML_scores_models_{}.png'.format('_'.join(feats))
plt.savefig(savefig_path / filename, bbox_inches='tight')
return df
def calculate_metrics_from_ML_dss(dss):
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import auc
from sklearn.metrics import precision_recall_curve
import xarray as xr
import numpy as np
import pandas as pd
mean_fpr = np.linspace(0, 1, 100)
# fpr = dss['y_true'].copy(deep=False).values
# tpr = dss['y_true'].copy(deep=False).values
# y_true = dss['y_true'].values
# y_prob = dss['y_prob'].values
ok = [x for x in dss['outer_kfold'].values]
ik = [x for x in dss['inner_kfold'].values]
m = [x for x in dss['model'].values]
sc = [x for x in dss['scoring'].values]
f = [x for x in dss['features'].values]
# r = [x for x in dss['neg_pos_ratio'].values]
ind = pd.MultiIndex.from_product(
[ok, ik, m, sc, f],
names=[
'outer_kfold',
'inner_kfold',
'model',
'scoring',
'features']) # , 'station'])
okn = [x for x in range(dss['outer_kfold'].size)]
ikn = [x for x in range(dss['inner_kfold'].size)]
mn = [x for x in range(dss['model'].size)]
scn = [x for x in range(dss['scoring'].size)]
fn = [x for x in range(dss['features'].size)]
ds_list = []
for i in okn:
for j in ikn:
for k in mn:
for n in scn:
for m in fn:
ds = xr.Dataset()
y_true = dss['y_true'].isel(
outer_kfold=i, inner_kfold=j, model=k, scoring=n, features=m).reset_coords(drop=True).squeeze()
y_prob = dss['y_prob'].isel(
outer_kfold=i, inner_kfold=j, model=k, scoring=n, features=m).reset_coords(drop=True).squeeze()
y_true = y_true.dropna('sample')
y_prob = y_prob.dropna('sample')
if y_prob.size == 0:
# in case of NaNs in the results:
fpr_da = xr.DataArray(
np.nan*np.ones((1)), dims=['sample'])
fpr_da['sample'] = [
x for x in range(fpr_da.size)]
tpr_da = xr.DataArray(
np.nan*np.ones((1)), dims=['sample'])
tpr_da['sample'] = [
x for x in range(tpr_da.size)]
prn_da = xr.DataArray(
np.nan*np.ones((1)), dims=['sample'])
prn_da['sample'] = [
x for x in range(prn_da.size)]
rcll_da = xr.DataArray(
np.nan*np.ones((1)), dims=['sample'])
rcll_da['sample'] = [
x for x in range(rcll_da.size)]
tpr_fpr = xr.DataArray(
np.nan*np.ones((100)), dims=['FPR'])
tpr_fpr['FPR'] = mean_fpr
prn_rcll = xr.DataArray(
np.nan*np.ones((100)), dims=['RCLL'])
prn_rcll['RCLL'] = mean_fpr
pr_auc_da = xr.DataArray(np.nan)
roc_auc_da = xr.DataArray(np.nan)
no_skill_da = xr.DataArray(np.nan)
else:
no_skill = len(
y_true[y_true == 1]) / len(y_true)
no_skill_da = xr.DataArray(no_skill)
fpr, tpr, _ = roc_curve(y_true, y_prob)
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
roc_auc = roc_auc_score(y_true, y_prob)
prn, rcll, _ = precision_recall_curve(
y_true, y_prob)
interp_prn = np.interp(
mean_fpr, rcll[::-1], prn[::-1])
interp_prn[0] = 1.0
pr_auc_score = auc(rcll, prn)
roc_auc_da = xr.DataArray(roc_auc)
pr_auc_da = xr.DataArray(pr_auc_score)
prn_da = xr.DataArray(prn, dims=['sample'])
prn_da['sample'] = [x for x in range(len(prn))]
rcll_da = xr.DataArray(rcll, dims=['sample'])
rcll_da['sample'] = [
x for x in range(len(rcll))]
fpr_da = xr.DataArray(fpr, dims=['sample'])
fpr_da['sample'] = [x for x in range(len(fpr))]
tpr_da = xr.DataArray(tpr, dims=['sample'])
tpr_da['sample'] = [x for x in range(len(tpr))]
tpr_fpr = xr.DataArray(
interp_tpr, dims=['FPR'])
tpr_fpr['FPR'] = mean_fpr
prn_rcll = xr.DataArray(
interp_prn, dims=['RCLL'])
prn_rcll['RCLL'] = mean_fpr
ds['fpr'] = fpr_da
ds['tpr'] = tpr_da
ds['roc-auc'] = roc_auc_da
ds['pr-auc'] = pr_auc_da
ds['prn'] = prn_da
ds['rcll'] = rcll_da
ds['TPR'] = tpr_fpr
ds['PRN'] = prn_rcll
ds['no_skill'] = no_skill_da
ds_list.append(ds)
ds = xr.concat(ds_list, 'dim_0')
ds['dim_0'] = ind
ds = ds.unstack()
ds.attrs = dss.attrs
ds['fpr'].attrs['long_name'] = 'False positive rate'
ds['tpr'].attrs['long_name'] = 'True positive rate'
ds['prn'].attrs['long_name'] = 'Precision'
ds['rcll'].attrs['long_name'] = 'Recall'
ds['roc-auc'].attrs['long_name'] = 'ROC or FPR-TPR Area under curve'
ds['pr-auc'].attrs['long_name'] = 'Precition-Recall Area under curve'
ds['PRN'].attrs['long_name'] = 'Precision-Recall'
ds['TPR'].attrs['long_name'] = 'TPR-FPR (ROC)'
dss = xr.merge([dss, ds], combine_attrs='no_conflicts')
return dss
#
# def load_ML_models(path=hydro_ml_path, station='drag', prefix='CVM', suffix='.pkl'):
# from aux_gps import path_glob
# import joblib
# import matplotlib.pyplot as plt
# import seaborn as sns
# import xarray as xr
# import pandas as pd
# model_files = path_glob(path, '{}_*{}'.format(prefix, suffix))
# model_files = sorted(model_files)
# model_files = [x for x in model_files if station in x.as_posix()]
# m_list = [joblib.load(x) for x in model_files]
# model_files = [x.as_posix().split('/')[-1].split('.')[0] for x in model_files]
# # fix roc-auc:
# model_files = [x.replace('roc_auc', 'roc-auc') for x in model_files]
# print('loading {} station only.'.format(station))
# model_names = [x.split('_')[3] for x in model_files]
## model_pw_stations = [x.split('_')[1] for x in model_files]
## model_hydro_stations = [x.split('_')[2] for x in model_files]
# model_nsplits = [x.split('_')[6] for x in model_files]
# model_scores = [x.split('_')[5] for x in model_files]
# model_features = [x.split('_')[4] for x in model_files]
# model_test_sizes = []
# for file in model_files:
# try:
# model_test_sizes.append(int(file.split('_')[7]))
# except IndexError:
# model_test_sizes.append(20)
## model_pwv_hs_id = list(zip(model_pw_stations, model_hydro_stations))
## model_pwv_hs_id = ['_'.join(x) for filename = 'CVR_{}_{}_{}_{}_{}.nc'.format(
# name, features, refitted_scorer, ikfolds, okfolds)
# x in model_pwv_hs_id]
# # transform model_dict to dataarray:
# tups = [tuple(x) for x in zip(model_names, model_scores, model_nsplits, model_features, model_test_sizes)] #, model_pwv_hs_id)]
# ind = pd.MultiIndex.from_tuples((tups), names=['model', 'scoring', 'splits', 'feature', 'test_size']) #, 'station'])
# da = xr.DataArray(m_list, dims='dim_0')
# da['dim_0'] = ind
# da = da.unstack('dim_0')
# da['splits'] = da['splits'].astype(int)
# da['test_size'].attrs['units'] = '%'
# return da
def plot_heatmaps_for_all_models_and_scorings(dss, var='roc-auc'): # , save=True):
import xarray as xr
import seaborn as sns
import matplotlib.pyplot as plt
# assert station == dss.attrs['pwv_id']
cmaps = {'roc-auc': sns.color_palette("Blues", as_cmap=True),
'pr-auc': sns.color_palette("Greens", as_cmap=True)}
fg = xr.plot.FacetGrid(
dss,
col='model',
row='scoring',
sharex=True,
sharey=True, figsize=(10, 20))
dss = dss.mean('inner_kfold', keep_attrs=True)
vmin, vmax = dss[var].min(), 1
norm = plt.Normalize(vmin=vmin, vmax=vmax)
for i in range(fg.axes.shape[0]): # i is rows
for j in range(fg.axes.shape[1]): # j is cols
ax = fg.axes[i, j]
modelname = dss['model'].isel(model=j).item()
scoring = dss['scoring'].isel(scoring=i).item()
model = dss[var].isel(
{'model': j, 'scoring': i}).reset_coords(drop=True)
df = model.to_dataframe()
title = '{} model ({})'.format(modelname, scoring)
df = df.unstack()
mean = df.mean()
mean.name = 'mean'
df = df.append(mean).T.droplevel(0)
ax = sns.heatmap(df, annot=True, cmap=cmaps[var], cbar=False,
ax=ax, norm=norm)
ax.set_title(title)
ax.vlines([4], 0, 10, color='r', linewidth=2)
if j > 0:
ax.set_ylabel('')
if i < 2:
ax.set_xlabel('')
cax = fg.fig.add_axes([0.1, 0.025, .8, .015])
fg.fig.colorbar(ax.get_children()[0], cax=cax, orientation="horizontal")
fg.fig.suptitle('{}'.format(
dss.attrs[var].upper()), fontweight='bold')
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.937,
bottom=0.099,
left=0.169,
right=0.993,
hspace=0.173,
wspace=0.051)
# if save:
# filename = 'hydro_models_heatmaps_on_{}_{}_{}.png'.format(
# station, dss['outer_kfold'].size, var)
# plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_ROC_from_dss(dss, feats=None, fontsize=16, save=True, wv_label='pwv',
best=False):
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from aux_gps import convert_da_to_long_form_df
sns.set_style('whitegrid')
sns.set_style('ticks')
sns.set(font_scale=1.0)
cmap = sns.color_palette('tab10', n_colors=3)
splits = dss['outer_split'].size
if 'neg_sample' in dss.dims:
neg = dss['neg_sample'].size
else:
neg = 1
dss = dss.reindex(scorer=scorer_order)
if feats is None:
feats = ['pwv', 'pwv+pressure', 'pwv+pressure+doy']
if 'model' not in dss.dims:
dss = dss.expand_dims('model')
dss['model'] = [dss.attrs['model']]
dss = dss.sortby('model', ascending=False)
dst = dss.sel(features=feats) # .reset_coords(drop=True)
# df = dst['TPR'].to_dataframe()
# if 'neg_sample' in dss.dims:
# fpr_lnum = 5
# model_lnum = 0
# scorer_lnum = 4
# features_lnum = 1
# else:
# fpr_lnum = 4
# model_lnum = 0
# scorer_lnum = 3
# features_lnum = 1
# df['FPR'] = df.index.get_level_values(fpr_lnum)
# df['model'] = df.index.get_level_values(model_lnum)
# df['scorer'] = df.index.get_level_values(scorer_lnum)
# df['features'] = df.index.get_level_values(features_lnum)
df = convert_da_to_long_form_df(dst['TPR'], var_name='score')
# df = df.melt(value_vars='TPR', id_vars=[
# 'features', 'model', 'scorer', 'FPR'], var_name='score')
if best is not None:
if best == 'compare_negs':
df1 = df.copy()[df['neg_sample'] == 1]
df2 = df.copy()
df2.drop('neg_sample', axis=1, inplace=True)
df1.drop('neg_sample', axis=1, inplace=True)
df1['neg_group'] = 1
df2['neg_group'] = 25
df = pd.concat([df1, df2])
col = 'neg_group'
titles = ['Neg=1', 'Neg=25']
else:
col=None
else:
col = 'scorer'
df['model'] = df['model'].str.replace('SVC', 'SVM')
fg = sns.FacetGrid(df, col=col, row='model', aspect=1)
fg.map_dataframe(sns.lineplot, x='FPR', y='value',
hue='features', ci='sd', palette=cmap, n_boot=None,
estimator='mean')
for i in range(fg.axes.shape[0]): # i is rows
model = dss['model'].isel(model=i).item()
auc_model = dst.sel(model=model)
if model == 'SVC':
model = 'SVM'
for j in range(fg.axes.shape[1]): # j is cols
scorer = dss['scorer'].isel(scorer=j).item()
auc_scorer_df = auc_model['roc_auc_score'].sel(scorer=scorer).reset_coords(drop=True).to_dataframe()
auc_scorer_mean = [auc_scorer_df.loc[x].mean() for x in feats]
auc_scorer_std = [auc_scorer_df.loc[x].std() for x in feats]
auc_mean = [x.item() for x in auc_scorer_mean]
auc_std = [x.item() for x in auc_scorer_std]
if j == 0 and best is not None:
scorer = dss['scorer'].isel(scorer=j).item()
auc_scorer_df = auc_model['roc_auc_score'].sel(scorer=scorer).isel(neg_sample=0).reset_coords(drop=True).to_dataframe()
auc_scorer_mean = [auc_scorer_df.loc[x].mean() for x in feats]
auc_scorer_std = [auc_scorer_df.loc[x].std() for x in feats]
auc_mean = [x.item() for x in auc_scorer_mean]
auc_std = [x.item() for x in auc_scorer_std]
ax = fg.axes[i, j]
ax.plot([0, 1], [0, 1], color='tab:red', linestyle='--', lw=2,
label='chance')
if best is not None:
if best == 'compare_negs':
title = '{} | {}'.format(model, titles[j])
else:
title = '{}'.format(model)
else:
title = '{} | scorer={}'.format(model, scorer)
ax.set_title(title, fontsize=fontsize)
handles, labels = ax.get_legend_handles_labels()
hands = handles[0:3]
# labes = labels[0:3]
new_labes = []
for auc, auc_sd in zip(auc_mean, auc_std):
l = r'{:.2}$\pm${:.1}'.format(auc, auc_sd)
new_labes.append(l)
ax.legend(handles=hands, labels=new_labes, loc='lower right',
title='AUCs', prop={'size': fontsize-4})
ax.set_xticks([0, 0.2, 0.4, 0.6, 0.8, 1])
ax.grid(True)
# return handles, labels
fg.set_ylabels('True Positive Rate', fontsize=fontsize)
fg.set_xlabels('False Positive Rate', fontsize=fontsize)
if wv_label is not None:
labels = [x.replace('pwv', wv_label) for x in labels]
if best is not None:
if best == 'compare_negs':
fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize},
edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=2, fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.865,
bottom=0.079,
left=0.144,
right=0.933,
hspace=0.176,
wspace=0.2)
else:
fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize},
edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=1, fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.825,
bottom=0.079,
left=0.184,
right=0.933,
hspace=0.176,
wspace=0.2)
else:
fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
# true_scores = dst.sel(scorer=scorer, model=model)['true_score']
# dss['permutation_score'].plot.hist(ax=ax, bins=25, color=color)
# ymax = ax.get_ylim()[-1] - 0.2
# ax.vlines(x=true_scores.values, ymin=0, ymax=ymax, linestyle='--', color=cmap)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.915)
if save:
if best is not None:
filename = 'ROC_plots_models_nested_CV_best_hp_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
else:
filename = 'ROC_plots_models_nested_CV_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_permutation_importances_from_dss(dss, feat_dim='features',
outer_dim='outer_split',
features='pwv+pressure+doy',
fix_xticklabels=True,split=1,
axes=None, save=True):
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from natsort import natsorted
sns.set_palette('Dark2', 6)
sns.set_style('whitegrid')
sns.set_style('ticks')
model = dss.attrs['model']
# use dss.sel(model='RF') first as input
dss['feature'] = dss['feature'].str.replace('DOY', 'doy')
dss = dss.sel({feat_dim: features})
# tests_ds = dss['test_score']
# tests_ds = tests_ds.sel(scorer=scorer)
# max_score_split = int(tests_ds.idxmax(outer_dim).item())
# use mean outer split:
# dss = dss.mean(outer_dim)
dss = dss.sel({outer_dim: split})
feats = features.split('+')
fn = len(feats)
if fn == 1:
gr_spec = None
fix_xticklabels = False
elif fn == 2:
gr_spec = [1, 1]
elif fn == 3:
gr_spec = [2, 5, 5]
if axes is None:
fig, axes = plt.subplots(1, fn, sharey=True, figsize=(17, 5), gridspec_kw={'width_ratios': gr_spec})
try:
axes.flatten()
except AttributeError:
axes = [axes]
for i, f in enumerate(sorted(feats)):
fe = [x for x in dss['feature'].values if f in x]
dsf = dss['PI_mean'].sel(
feature=fe).reset_coords(
drop=True)
sorted_feat = natsorted([x for x in dsf.feature.values])
dsf = dsf.reindex(feature=sorted_feat)
print([x for x in dsf.feature.values])
# dsf = dss['PI_mean'].sel(
# feature=fe).reset_coords(
# drop=True)
dsf = dsf.to_dataset('scorer').to_dataframe(
).reset_index(drop=True)
title = '{}'.format(f.upper())
dsf.plot.bar(ax=axes[i], title=title, rot=0, legend=False, zorder=20,
width=.8)
dsf_sum = dsf.sum().tolist()
handles, labels = axes[i].get_legend_handles_labels()
labels = [
'{} ({:.1f})'.format(
x, y) for x, y in zip(
labels, dsf_sum)]
axes[i].legend(handles=handles, labels=labels, prop={'size': 10}, loc='upper left')
axes[i].set_ylabel('Scores')
axes[i].grid(axis='y', zorder=1)
if fix_xticklabels:
n = sum(['pwv' in x for x in dss.feature.values])
axes[0].xaxis.set_ticklabels('')
hrs = np.arange(-24, -24+n)
axes[1].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12)
axes[2].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12)
axes[1].set_xlabel('Hours prior to flood')
axes[2].set_xlabel('Hours prior to flood')
fig.tight_layout()
fig.suptitle('permutation importance scores for {} model split #{}'.format(model, split))
fig.subplots_adjust(top=0.904)
if save:
filename = 'permutation_importances_{}_split_{}_all_scorers_{}.png'.format(model, split, features)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_feature_importances_from_dss(
dss,
feat_dim='features', outer_dim='outer_split',
features='pwv+pressure+doy', fix_xticklabels=True,
axes=None, save=True, ylim=[0, 12], fontsize=16):
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from natsort import natsorted
sns.set_palette('Dark2', 6)
# sns.set_style('whitegrid')
# sns.set_style('ticks')
sns.set_theme(style='ticks', font_scale=1.5)
# use dss.sel(model='RF') first as input
dss['feature'] = dss['feature'].str.replace('DOY', 'doy')
dss = dss.sel({feat_dim: features})
# tests_ds = dss['test_score']
# tests_ds = tests_ds.sel(scorer=scorer)
# max_score_split = int(tests_ds.idxmax(outer_dim).item())
# use mean outer split:
dss = dss.mean(outer_dim)
feats = features.split('+')
fn = len(feats)
if fn == 1:
gr_spec = None
fix_xticklabels = False
elif fn == 2:
gr_spec = [1, 1]
elif fn == 3:
gr_spec = [5, 5, 2]
if axes is None:
fig, axes = plt.subplots(1, fn, sharey=True, figsize=(17, 5), gridspec_kw={'width_ratios': gr_spec})
try:
axes.flatten()
except AttributeError:
axes = [axes]
for i, f in enumerate(feats):
fe = [x for x in dss['feature'].values if f in x]
dsf = dss['feature_importances'].sel(
feature=fe).reset_coords(
drop=True)
# dsf = dss['PI_mean'].sel(
# feature=fe).reset_coords(
# drop=True)
sorted_feat = natsorted([x for x in dsf.feature.values])
# sorted_feat = [x for x in dsf.feature.values]
print(sorted_feat)
dsf = dsf.reindex(feature=sorted_feat)
dsf = dsf.to_dataset('scorer').to_dataframe(
).reset_index(drop=True) * 100
title = '{}'.format(f.upper())
dsf.plot.bar(ax=axes[i], title=title, rot=0, legend=False, zorder=20,
width=.8)
axes[i].set_title(title, fontsize=fontsize)
dsf_sum = dsf.sum().tolist()
handles, labels = axes[i].get_legend_handles_labels()
labels = [
'{} ({:.1f} %)'.format(
x, y) for x, y in zip(
labels, dsf_sum)]
axes[i].legend(handles=handles, labels=labels, prop={'size': 12}, loc='upper center')
axes[i].set_ylabel('Feature importances [%]')
axes[i].grid(axis='y', zorder=1)
if ylim is not None:
[ax.set_ylim(*ylim) for ax in axes]
if fix_xticklabels:
n = sum(['pwv' in x for x in dss.feature.values])
axes[2].xaxis.set_ticklabels('')
hrs = np.arange(-1, -25, -1)
axes[0].set_xticklabels(hrs, rotation=30, ha="center", fontsize=14)
axes[1].set_xticklabels(hrs, rotation=30, ha="center", fontsize=14)
axes[2].tick_params(labelsize=fontsize)
axes[0].set_xlabel('Hours prior to flood')
axes[1].set_xlabel('Hours prior to flood')
fig.tight_layout()
if save:
filename = 'RF_feature_importances_all_scorers_{}.png'.format(features)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_feature_importances(
dss,
feat_dim='features',
features='pwv+pressure+doy',
scoring='f1', fix_xticklabels=True,
axes=None, save=True):
# use dss.sel(model='RF') first as input
import matplotlib.pyplot as plt
import numpy as np
dss = dss.sel({feat_dim: features})
tests_ds = dss[[x for x in dss if 'test' in x]]
tests_ds = tests_ds.sel(scoring=scoring)
score_ds = tests_ds['test_{}'.format(scoring)]
max_score = score_ds.idxmax('outer_kfold').values
feats = features.split('+')
fn = len(feats)
if axes is None:
fig, axes = plt.subplots(1, fn, sharey=True, figsize=(17, 5), gridspec_kw={'width_ratios': [1, 4, 4]})
try:
axes.flatten()
except AttributeError:
axes = [axes]
for i, f in enumerate(feats):
fe = [x for x in dss['feature'].values if f in x]
dsf = dss['feature_importances'].sel(
feature=fe,
outer_kfold=max_score).reset_coords(
drop=True)
dsf = dsf.to_dataset('scoring').to_dataframe(
).reset_index(drop=True) * 100
title = '{} ({})'.format(f.upper(), scoring)
dsf.plot.bar(ax=axes[i], title=title, rot=0, legend=False, zorder=20,
width=.8)
dsf_sum = dsf.sum().tolist()
handles, labels = axes[i].get_legend_handles_labels()
labels = [
'{} ({:.1f} %)'.format(
x, y) for x, y in zip(
labels, dsf_sum)]
axes[i].legend(handles=handles, labels=labels, prop={'size': 8})
axes[i].set_ylabel('Feature importance [%]')
axes[i].grid(axis='y', zorder=1)
if fix_xticklabels:
axes[0].xaxis.set_ticklabels('')
hrs = np.arange(-24,0)
axes[1].set_xticklabels(hrs, rotation = 30, ha="center", fontsize=12)
axes[2].set_xticklabels(hrs, rotation = 30, ha="center", fontsize=12)
axes[1].set_xlabel('Hours prior to flood')
axes[2].set_xlabel('Hours prior to flood')
if save:
fig.tight_layout()
filename = 'RF_feature_importances_{}.png'.format(scoring)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_feature_importances_for_all_scorings(dss,
features='doy+pwv+pressure',
model='RF', splitfigs=True):
import matplotlib.pyplot as plt
# station = dss.attrs['pwv_id'].upper()
dss = dss.sel(model=model).reset_coords(drop=True)
fns = len(features.split('+'))
scores = dss['scoring'].values
scores1 = ['f1', 'precision', 'recall']
scores2 = ['hss', 'tss', 'accuracy','roc-auc']
if splitfigs:
fig, axes = plt.subplots(len(scores1), fns, sharey=True, figsize=(15, 20))
for i, score in enumerate(scores1):
plot_feature_importances(
dss, features=features, scoring=score, axes=axes[i, :])
fig.suptitle(
'feature importances of {} model'.format(model))
fig.tight_layout()
fig.subplots_adjust(top=0.935,
bottom=0.034,
left=0.039,
right=0.989,
hspace=0.19,
wspace=0.027)
filename = 'RF_feature_importances_1.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
fig, axes = plt.subplots(len(scores2), fns, sharey=True, figsize=(15, 20))
for i, score in enumerate(scores2):
plot_feature_importances(
dss, features=features, scoring=score, axes=axes[i, :])
fig.suptitle(
'feature importances of {} model'.format(model))
fig.tight_layout()
fig.subplots_adjust(top=0.935,
bottom=0.034,
left=0.039,
right=0.989,
hspace=0.19,
wspace=0.027)
filename = 'RF_feature_importances_2.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
else:
fig, axes = plt.subplots(len(scores), fns, sharey=True, figsize=(15, 20))
for i, score in enumerate(scores):
plot_feature_importances(
dss, features=features, scoring=score, axes=axes[i, :])
fig.suptitle(
'feature importances of {} model'.format(model))
fig.tight_layout()
fig.subplots_adjust(top=0.935,
bottom=0.034,
left=0.039,
right=0.989,
hspace=0.19,
wspace=0.027)
filename = 'RF_feature_importances.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return dss
def plot_ROC_curve_from_dss_nested_CV(dss, outer_dim='outer_split',
plot_chance=True, color='tab:blue',
fontsize=14, plot_legend=True,
title=None,
ax=None, main_label=None):
import matplotlib.pyplot as plt
import numpy as np
if ax is None:
fig, ax = plt.subplots()
if title is None:
title = "Receiver operating characteristic"
mean_fpr = dss['FPR'].values
mean_tpr = dss['TPR'].mean(outer_dim).values
mean_auc = dss['roc_auc_score'].mean().item()
if np.isnan(mean_auc):
return ValueError
std_auc = dss['roc_auc_score'].std().item()
field = 'TPR'
xlabel = 'False Positive Rate'
ylabel = 'True Positive Rate'
if main_label is None:
main_label = r'Mean ROC (AUC={:.2f}$\pm${:.2f})'.format(mean_auc, std_auc)
textstr = '\n'.join(['{}'.format(
main_label), r'(AUC={:.2f}$\pm${:.2f})'.format(mean_auc, std_auc)])
main_label = textstr
ax.plot(mean_fpr, mean_tpr, color=color,
lw=3, alpha=.8, label=main_label)
std_tpr = dss[field].std(outer_dim).values
n = dss[outer_dim].size
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
# plot Chance line:
if plot_chance:
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8, zorder=206)
stdlabel = r'$\pm$ 1 Std. dev.'
stdstr = '\n'.join(['{}'.format(stdlabel), r'({} outer splits)'.format(n)])
ax.fill_between(
mean_fpr,
tprs_lower,
tprs_upper,
color='grey',
alpha=.2, label=stdstr)
ax.grid()
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05])
# ax.set_title(title, fontsize=fontsize)
ax.tick_params(axis='y', labelsize=fontsize)
ax.tick_params(axis='x', labelsize=fontsize)
ax.set_xlabel(xlabel, fontsize=fontsize)
ax.set_ylabel(ylabel, fontsize=fontsize)
ax.set_title(title, fontsize=fontsize)
return ax
def plot_ROC_PR_curve_from_dss(
dss,
outer_dim='outer_kfold',
inner_dim='inner_kfold',
plot_chance=True,
ax=None,
color='b',
title=None,
std_on='inner',
main_label=None,
fontsize=14,
plot_type='ROC',
plot_std_legend=True):
"""plot classifier metrics, plot_type=ROC or PR"""
import matplotlib.pyplot as plt
import numpy as np
if ax is None:
fig, ax = plt.subplots()
if title is None:
title = "Receiver operating characteristic"
if plot_type == 'ROC':
mean_fpr = dss['FPR'].values
mean_tpr = dss['TPR'].mean(outer_dim).mean(inner_dim).values
mean_auc = dss['roc-auc'].mean().item()
if np.isnan(mean_auc):
return ValueError
std_auc = dss['roc-auc'].std().item()
field = 'TPR'
xlabel = 'False Positive Rate'
ylabel = 'True Positive Rate'
elif plot_type == 'PR':
mean_fpr = dss['RCLL'].values
mean_tpr = dss['PRN'].mean(outer_dim).mean(inner_dim).values
mean_auc = dss['pr-auc'].mean().item()
if np.isnan(mean_auc):
return ValueError
std_auc = dss['pr-auc'].std().item()
no_skill = dss['no_skill'].mean(outer_dim).mean(inner_dim).item()
field = 'PRN'
xlabel = 'Recall'
ylabel = 'Precision'
# plot mean ROC:
if main_label is None:
main_label = r'Mean {} (AUC={:.2f}$\pm${:.2f})'.format(
plot_type, mean_auc, std_auc)
else:
textstr = '\n'.join(['Mean ROC {}'.format(
main_label), r'(AUC={:.2f}$\pm${:.2f})'.format(mean_auc, std_auc)])
main_label = textstr
ax.plot(mean_fpr, mean_tpr, color=color,
lw=2, alpha=.8, label=main_label)
if std_on == 'inner':
std_tpr = dss[field].mean(outer_dim).std(inner_dim).values
n = dss[inner_dim].size
elif std_on == 'outer':
std_tpr = dss[field].mean(inner_dim).std(outer_dim).values
n = dss[outer_dim].size
elif std_on == 'all':
std_tpr = dss[field].stack(
dumm=[inner_dim, outer_dim]).std('dumm').values
n = dss[outer_dim].size * dss[inner_dim].size
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
# plot Chance line:
if plot_chance:
if plot_type == 'ROC':
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
elif plot_type == 'PR':
ax.plot([0, 1], [no_skill, no_skill], linestyle='--', color='r',
lw=2, label='No Skill', alpha=.8)
# plot ROC STD range:
ax.fill_between(
mean_fpr,
tprs_lower,
tprs_upper,
color='grey',
alpha=.2, label=r'$\pm$ 1 std. dev. ({} {} splits)'.format(n, std_on))
ax.grid()
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05])
ax.set_title(title, fontsize=fontsize)
ax.tick_params(axis='y', labelsize=fontsize)
ax.tick_params(axis='x', labelsize=fontsize)
ax.set_xlabel(xlabel, fontsize=fontsize)
ax.set_ylabel(ylabel, fontsize=fontsize)
# handles, labels = ax.get_legend_handles_labels()
# if not plot_std_legend:
# if len(handles) == 7:
# handles = handles[:-2]
# labels = labels[:-2]
# else:
# handles = handles[:-1]
# labels = labels[:-1]
# ax.legend(handles=handles, labels=labels, loc="lower right",
# fontsize=fontsize)
return ax
def load_cv_splits_from_pkl(savepath):
import joblib
from aux_gps import path_glob
file = path_glob(savepath, 'CV_inds_*.pkl')[0]
n_splits = int(file.as_posix().split('/')[-1].split('_')[2])
shuffle = file.as_posix().split('/')[-1].split('.')[0].split('=')[-1]
cv_dict = joblib.load(file)
spl = len([x for x in cv_dict.keys()])
assert spl == n_splits
print('loaded {} with {} splits.'.format(file, n_splits))
return cv_dict
def save_cv_splits_to_dict(X, y, cv, train_key='train', test_key='test',
savepath=None):
import joblib
cv_dict = {}
for i, (train, test) in enumerate(cv.split(X, y)):
cv_dict[i+1] = {train_key: train, test_key: test}
# check for completness:
all_train = [x['train'] for x in cv_dict.values()]
flat_train = set([item for sublist in all_train for item in sublist])
all_test = [x['test'] for x in cv_dict.values()]
flat_test = set([item for sublist in all_test for item in sublist])
assert flat_test == flat_train
if savepath is not None:
filename = 'CV_inds_{}_splits_shuffle={}.pkl'.format(cv.n_splits, cv.shuffle)
joblib.dump(cv_dict, savepath / filename)
print('saved {} to {}.'.format(filename, savepath))
return cv_dict
def plot_many_ROC_curves(model, X, y, name='', color='b', ax=None,
plot_chance=True, title=None, n_splits=None):
from sklearn.metrics import plot_roc_curve
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
import numpy as np
from sklearn.model_selection import StratifiedKFold
if ax is None:
fig, ax = plt.subplots()
if title is None:
title = "Receiver operating characteristic"
# just plot the ROC curve for X, y, no nsplits and stats:
if n_splits is None:
viz = plot_roc_curve(model, X, y, color=color, ax=ax, name=name)
else:
cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
for i, (train, val) in enumerate(cv.split(X, y)):
model.fit(X[train], y[train])
# y_score = model.fit(X[train], y[train]).predict_proba(X[val])[:, 1]
y_pred = model.predict(X[val])
fpr, tpr, _ = roc_curve(y[val], y_pred)
# viz = plot_roc_curve(model, X[val], y[val],
# name='ROC fold {}'.format(i),
# alpha=0.3, lw=1, ax=ax)
# fpr = viz.fpr
# tpr = viz.tpr
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(roc_auc_score(y[val], y_pred))
# scores.append(f1_score(y[val], y_pred))
# scores = np.array(scores)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color=color,
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (
mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
if plot_chance:
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],
title=title)
ax.legend(loc="lower right")
return ax
def HP_tuning(X, y, model_name='SVC', val_size=0.18, n_splits=None,
test_size=None,
best_score='f1', seed=42, savepath=None):
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
""" do HP tuning with ML_Classfier_Switcher object and return a DataSet of
results. note that the X, y are already after split to val/test"""
# first get the features from X:
features = list(set(['_'.join(x.split('_')[0:2])
for x in X['feature'].values]))
ml = ML_Classifier_Switcher()
sk_model = ml.pick_model(model_name)
param_grid = ml.param_grid
if n_splits is None and val_size is not None:
n_splits = int((1 // val_size) - 1)
elif val_size is not None and n_splits is not None:
raise('Both val_size and n_splits are defined, choose either...')
print('StratifiedKfolds of {}.'.format(n_splits))
cv = StratifiedKFold(n_splits=n_splits, shuffle=True)
gr = GridSearchCV(estimator=sk_model, param_grid=param_grid, cv=cv,
n_jobs=-1, scoring=['f1', 'roc_auc', 'accuracy'], verbose=1,
refit=best_score, return_train_score=True)
gr.fit(X, y)
if best_score is not None:
ds, best_model = process_gridsearch_results(gr, model_name,
features=features, pwv_id=X.attrs['pwv_id'], hs_id=y.attrs['hydro_station_id'], test_size=test_size)
else:
ds = process_gridsearch_results(gr, model_name, features=features,
pwv_id=X.attrs['pwv_id'], hs_id=y.attrs['hydro_station_id'], test_size=test_size)
best_model = None
if savepath is not None:
save_cv_results(ds, best_model=best_model, savepath=savepath)
return ds, best_model
def save_gridsearchcv_object(GridSearchCV, savepath, filename):
import joblib
print('{} was saved to {}'.format(filename, savepath))
joblib.dump(GridSearchCV, savepath / filename)
return
def run_RF_feature_importance_on_all_features(path=hydro_path, gr_path=hydro_ml_path/'holdout'):
import xarray as xr
from aux_gps import get_all_possible_combinations_from_list
feats = get_all_possible_combinations_from_list(
['pwv', 'pressure', 'doy'], reduce_single_list=True, combine_by_sep='+')
feat_list = []
for feat in feats:
da = holdout_test(model_name='RF', return_RF_FI=True, features=feat)
feat_list.append(da)
daa = xr.concat(feat_list, 'features')
daa['features'] = feats
return daa
def load_nested_CV_test_results_from_all_models(path=hydro_ml_path, best=False,
neg=1, splits=4,
permutation=False):
from aux_gps import path_glob
import xarray as xr
if best:
if splits is not None:
file_str = 'nested_CV_test_results_*_all_features_with_hyper_params_best_hp_neg_{}_{}a.nc'.format(neg, splits)
if permutation:
file_str = 'nested_CV_test_results_*_all_features_permutation_tests_best_hp_neg_{}_{}a.nc'.format(neg, splits)
else:
if splits is not None:
file_str = 'nested_CV_test_results_*_all_features_with_hyper_params_neg_{}_{}a.nc'.format(neg, splits)
if permutation:
file_str = 'nested_CV_test_results_*_all_features_permutation_tests_neg_{}_{}a.nc'.format(neg, splits)
files = path_glob(path, file_str)
print(files)
models = [x.as_posix().split('/')[-1].split('_')[4] for x in files]
print('loading CV test results only for {} models'.format(', '.join(models)))
dsl = [xr.load_dataset(x) for x in files]
if not permutation:
dsl = [x[['mean_score', 'std_score', 'test_score', 'roc_auc_score', 'TPR']] for x in dsl]
dss = xr.concat(dsl, 'model')
dss['model'] = models
return dss
# def plot_all_permutation_test_results(dss, feats=None):
# import xarray as xr
# fg = xr.plot.FacetGrid(
# dss,
# col='scorer',
# row='model',
# sharex=True,
# sharey=True, figsize=(20, 20))
# for i in range(fg.axes.shape[0]): # i is rows
# model = dss['model'].isel(model=i).item()
# for j in range(fg.axes.shape[1]): # j is cols
# ax = fg.axes[i, j]
# scorer = dss['scorer'].isel(scorer=j).item()
# ax = plot_single_permutation_test_result(dss, feats=feats,
# scorer=scorer,
# model=model,
# ax=ax)
# fg.fig.tight_layout()
# return fg
def plot_permutation_test_results_from_dss(dss, feats=None, fontsize=14,
save=True, wv_label='pwv'):
# ax=None, scorer='f1', model='MLP'):
import matplotlib.pyplot as plt
import seaborn as sns
from PW_from_gps_figures import get_legend_labels_handles_title_seaborn_histplot
from aux_gps import convert_da_to_long_form_df
sns.set_style('whitegrid')
sns.set_style('ticks')
try:
splits = dss['outer_split'].size
except KeyError:
splits = 5
try:
assert 'best' in dss.attrs['comment']
best = True
except AssertionError:
best = False
if 'neg_sample' in dss.dims:
neg = dss['neg_sample'].size
else:
neg = 1
if 'model' not in dss.dims:
dss = dss.expand_dims('model')
dss['model'] = [dss.attrs['model']]
dss = dss.reindex(scorer=scorer_order)
# dss = dss.mean('outer_split')
cmap = sns.color_palette('tab10', n_colors=3)
if feats is None:
feats = ['pwv', 'pwv+pressure', 'pwv+pressure+doy']
dss = dss.sortby('model', ascending=False)
dst = dss.sel(features=feats) # .reset_coords(drop=True)
# df = dst[['permutation_score', 'true_score', 'pvalue']].to_dataframe()
# df['permutations'] = df.index.get_level_values(2)
# df['scorer'] = df.index.get_level_values(3)
# df['features'] = df.index.get_level_values(0)
# df['model'] = df.index.get_level_values(1)
# df['model'] = df['model'].str.replace('SVC', 'SVM')
# df = df.melt(value_vars=['permutation_score', 'true_score', 'pvalue'], id_vars=[
# 'features', 'model', 'scorer'], var_name='scores')
df = convert_da_to_long_form_df(dst[['permutation_score', 'true_score', 'pvalue']], var_name='scores')
df_p = df[df['scores'] == 'permutation_score']
df_pval = df[df['scores'] == 'pvalue']
# if ax is None:
# fig, ax = plt.subplots(figsize=(6, 8))
fg = sns.FacetGrid(df_p, col='scorer', row='model', legend_out=True,
sharex=False)
fg.map_dataframe(sns.histplot, x="value", hue="features",
legend=True, palette=cmap,
stat='density', kde=True,
element='bars', bins=10)
# pvals = dst.sel(scorer=scorer, model=model)[
# 'pvalue'].reset_coords(drop=True)
# pvals = pvals.values
# handles, labels, title = get_legend_labels_handles_title_seaborn_histplot(ax)
# new_labels = []
# for pval, label in zip(pvals, labels):
# label += ' (p={:.1})'.format(pval)
# new_labels.append(label)
# ax.legend(handles, new_labels, title=title)
df_t = df[df['scores'] == 'true_score']
for i in range(fg.axes.shape[0]): # i is rows
model = dss['model'].isel(model=i).item()
df_model = df_t[df_t['model'] == model]
df_pval_model = df_pval[df_pval['model'] == model]
for j in range(fg.axes.shape[1]): # j is cols
scorer = dss['scorer'].isel(scorer=j).item()
df1 = df_model[df_model['scorer'] == scorer]
df2 = df_pval_model[df_pval_model['scorer'] == scorer]
ax = fg.axes[i, j]
ymax = ax.get_ylim()[-1] - 0.2
plabels = []
for k, feat in enumerate(feats):
val = df1[df1['features']==feat]['value'].unique().item()
pval = df2[df2['features']==feat]['value'].unique().item()
plabels.append('pvalue: {:.2g}'.format(pval))
# print(i, val, feat, scorer, model)
ax.axvline(x=val, ymin=0, ymax=ymax, linestyle='--', color=cmap[k],
label=feat)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles, labels=plabels,
prop={'size': fontsize-4}, loc='upper left')
if 'hss' in scorer or 'tss' in scorer:
ax.set_xlim(-0.35, 1)
else:
ax.set_xlim(0.15, 1)
# ax.set_xticks([0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.1])
# handles, labels, title = get_legend_labels_handles_title_seaborn_histplot(ax)
if model == 'SVC':
model = 'SVM'
title = '{} | scorer={}'.format(model, scorer)
ax.set_title(title, fontsize=fontsize)
# ax.set_xlim(-0.3, 1)
fg.set_ylabels('Density', fontsize=fontsize)
fg.set_xlabels('Score', fontsize=fontsize)
if wv_label is not None:
labels = [x.replace('pwv', wv_label) for x in labels]
fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
# true_scores = dst.sel(scorer=scorer, model=model)['true_score']
# dss['permutation_score'].plot.hist(ax=ax, bins=25, color=color)
# ymax = ax.get_ylim()[-1] - 0.2
# ax.vlines(x=true_scores.values, ymin=0, ymax=ymax, linestyle='--', color=cmap)
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.92)
if save:
if best:
filename = 'permutation_test_models_nested_CV_best_hp_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
else:
filename = 'permutation_test_models_nested_CV_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg)
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def run_CV_nested_tests_on_all_features(path=hydro_path, gr_path=hydro_ml_path/'nested4',
verbose=False, model_name='SVC', params=None,
savepath=None, drop_hours=None, PI=30, Ptest=None,
suffix=None, sample_from_negatives=1):
"""returns the nested CV test results for all scorers, features and models,
if model is chosen, i.e., model='MLP', returns just this model results
and its hyper-parameters per each outer split"""
import xarray as xr
from aux_gps import get_all_possible_combinations_from_list
from aux_gps import save_ncfile
feats = get_all_possible_combinations_from_list(
['pwv', 'pressure', 'doy'], reduce_single_list=True, combine_by_sep='+')
feat_list = []
for feat in feats:
print('Running CV on feature {}'.format(feat))
ds = CV_test_after_GridSearchCV(path=path, gr_path=gr_path,
model_name=model_name, params=params,
features=feat, PI=PI, Ptest=Ptest,
verbose=verbose, drop_hours=drop_hours,
sample_from_negatives=sample_from_negatives)
feat_list.append(ds)
dsf = xr.concat(feat_list, 'features')
dsf['features'] = feats
dss = dsf
dss.attrs['model'] = model_name
if Ptest is not None:
filename = 'nested_CV_test_results_{}_all_features_permutation_tests'.format(model_name)
else:
filename = 'nested_CV_test_results_{}_all_features_with_hyper_params'.format(model_name)
if params is not None:
dss.attrs['comment'] = 'using best hyper parameters for all features and outer splits'
filename += '_best_hp'
filename += '_neg_{}'.format(sample_from_negatives)
if suffix is not None:
filename += '_{}'.format(suffix)
filename += '.nc'
if savepath is not None:
save_ncfile(dss, savepath, filename)
return dss
def run_holdout_test_on_all_models_and_features(path=hydro_path, gr_path=hydro_ml_path/'holdout'):
import xarray as xr
from aux_gps import get_all_possible_combinations_from_list
feats = get_all_possible_combinations_from_list(
['pwv', 'pressure', 'doy'], reduce_single_list=True, combine_by_sep='+')
models = ['MLP', 'SVC', 'RF']
model_list = []
model_list2 = []
for model in models:
feat_list = []
feat_list2 = []
for feat in feats:
best, roc = holdout_test(path=path, gr_path=gr_path,
model_name=model, features=feat)
best.index.name = 'scorer'
ds = best[['mean_score', 'std_score', 'holdout_test_scores']].to_xarray()
roc.index.name = 'FPR'
roc_da = roc.to_xarray().to_array('scorer')
feat_list.append(ds)
feat_list2.append(roc_da)
dsf = xr.concat(feat_list, 'features')
dsf2 = xr.concat(feat_list2, 'features')
dsf['features'] = feats
dsf2['features'] = feats
model_list.append(dsf)
model_list2.append(dsf2)
dss = xr.concat(model_list, 'model')
rocs = xr.concat(model_list2, 'model')
dss['model'] = models
rocs['model'] = models
dss['roc'] = rocs
return dss
def prepare_X_y_for_holdout_test(features='pwv+doy', model_name='SVC',
path=hydro_path, drop_hours=None,
negative_samples=1):
# combine X,y and split them according to test ratio and seed:
X, y = combine_pos_neg_from_nc_file(path, negative_sample_num=negative_samples)
# re arange X features according to model:
feats = features.split('+')
if model_name == 'RF' and 'doy' in feats:
if isinstance(feats, list):
feats.remove('doy')
feats.append('DOY')
elif isinstance(feats, str):
feats = 'DOY'
elif model_name != 'RF' and 'doy' in feats:
if isinstance(feats, list):
feats.remove('doy')
feats.append('doy_sin')
feats.append('doy_cos')
elif isinstance(feats, str):
feats = ['doy_sin']
feats.append('doy_cos')
if isinstance(X, list):
Xs = []
for X1 in X:
Xs.append(select_features_from_X(X1, feats))
X = Xs
else:
X = select_features_from_X(X, feats)
if drop_hours is not None:
if isinstance(X, list):
Xs = []
for X1 in X:
Xs.append(drop_hours_in_pwv_pressure_features(X1, drop_hours,
verbose=True))
X = Xs
else:
X = drop_hours_in_pwv_pressure_features(X, drop_hours, verbose=True)
return X, y
def CV_test_after_GridSearchCV(path=hydro_path, gr_path=hydro_ml_path/'nested4',
model_name='SVC', features='pwv', params=None,
verbose=False, drop_hours=None, PI=None,
Ptest=None, sample_from_negatives=1):
"""do cross_validate with all scorers on all gridsearchcv folds,
reads the nested outer splits CV file in gr_path"""
import xarray as xr
import numpy as np
# cv = read_cv_params_and_instantiate(gr_path/'CV_outer.csv')
cv_dict = load_cv_splits_from_pkl(gr_path)
if verbose:
print(cv_dict)
param_df_dict = load_one_gridsearchcv_object(path=gr_path,
cv_type='nested',
features=features,
model_name=model_name,
verbose=verbose)
Xs, ys = prepare_X_y_for_holdout_test(features, model_name, path,
drop_hours=drop_hours,
negative_samples=sample_from_negatives)
bests = []
for i, negative_sample in enumerate(np.arange(1, sample_from_negatives + 1)):
print('running with negative sample #{} out of {}'.format(
negative_sample, sample_from_negatives))
if isinstance(Xs, list):
X = Xs[i]
y = ys[i]
else:
X = Xs
y = ys
if Ptest is not None:
print('Permutation Test is in progress!')
ds = run_permutation_classifier_test(X, y, 5, param_df_dict, Ptest=Ptest,
params=params,
model_name=model_name, verbose=verbose)
return ds
if params is not None:
if verbose:
print('running with custom hyper parameters: ', params)
outer_bests = []
outer_rocs = []
fis = []
pi_means = []
pi_stds = []
n_splits = len([x for x in cv_dict.keys()])
for split, tt in cv_dict.items():
X_train = X[tt['train']]
y_train = y[tt['train']]
X_test = X[tt['test']]
y_test = y[tt['test']]
outer_split = '{}-{}'.format(split, n_splits)
# for i, (train_index, test_index) in enumerate(cv.split(X, y)):
# X_train = X[train_index]
# y_train = y[train_index]
# X_test = X[test_index]
# y_test = y[test_index]
# outer_split = '{}-{}'.format(i+1, cv.n_splits)
best_params_df = param_df_dict.get(outer_split)
if params is not None:
for key, value in params.items():
if isinstance(value, tuple):
for ind in best_params_df.index:
best_params_df.at[ind, key] = value
else:
best_params_df[key] = value
if model_name == 'RF':
if PI is not None:
bdf, roc, fi, pi_mean, pi_std = run_test_on_CV_split(X_train, y_train, X_test, y_test,
best_params_df, PI=PI, Ptest=Ptest,
model_name=model_name, verbose=verbose)
else:
bdf, roc, fi = run_test_on_CV_split(X_train, y_train, X_test, y_test,
best_params_df, PI=PI, Ptest=Ptest,
model_name=model_name, verbose=verbose)
fis.append(fi)
else:
if PI is not None:
bdf, roc, pi_mean, pi_std = run_test_on_CV_split(X_train, y_train, X_test, y_test,
best_params_df, PI=PI,
model_name=model_name, verbose=verbose)
else:
bdf, roc = run_test_on_CV_split(X_train, y_train, X_test, y_test,
best_params_df, PI=PI,
model_name=model_name, verbose=verbose)
if PI is not None:
pi_means.append(pi_mean)
pi_stds.append(pi_std)
bdf.index.name = 'scorer'
roc.index.name = 'FPR'
if 'hidden_layer_sizes' in bdf.columns:
bdf['hidden_layer_sizes'] = bdf['hidden_layer_sizes'].astype(str)
bdf_da = bdf.to_xarray()
roc_da = roc.to_xarray().to_array('scorer')
roc_da.name = 'TPR'
outer_bests.append(bdf_da)
outer_rocs.append(roc_da)
best_da = xr.concat(outer_bests, 'outer_split')
roc_da = xr.concat(outer_rocs, 'outer_split')
best = xr.merge([best_da, roc_da])
best['outer_split'] = np.arange(1, n_splits + 1)
if model_name == 'RF':
fi_da = xr.concat(fis, 'outer_split')
best['feature_importances'] = fi_da
if PI is not None:
pi_mean_da = xr.concat(pi_means, 'outer_split')
pi_std_da = xr.concat(pi_stds, 'outer_split')
best['PI_mean'] = pi_mean_da
best['PI_std'] = pi_std_da
bests.append(best)
if len(bests) == 1:
return bests[0]
else:
best_ds = xr.concat(bests, 'neg_sample')
best_ds['neg_sample'] = np.arange(1, sample_from_negatives + 1)
return best_ds
def run_permutation_classifier_test(X, y, cv, best_params_df, Ptest=100,
model_name='SVC', verbose=False, params=None):
from sklearn.model_selection import permutation_test_score
import xarray as xr
import numpy as np
def run_one_permutation_test(X=X, y=y, cv=cv, bp_df=best_params_df,
model_name=model_name, n_perm=Ptest,
verbose=verbose):
true_scores = []
pvals = []
perm_scores = []
for scorer in bp_df.index:
sk_model = ml.pick_model(model_name)
# get best params (drop two last cols since they are not params):
b_params = bp_df.T[scorer][:-2].to_dict()
if verbose:
print('{} scorer, params:{}'.format(scorer, b_params))
true, perm_scrs, pval = permutation_test_score(sk_model, X, y,
cv=cv,
n_permutations=Ptest,
scoring=scorers(scorer),
random_state=0,
n_jobs=-1)
true_scores.append(true)
pvals.append(pval)
perm_scores.append(perm_scrs)
true_da = xr.DataArray(true_scores, dims=['scorer'])
true_da['scorer'] = [x for x in bp_df.index.values]
true_da.name = 'true_score'
pval_da = xr.DataArray(pvals, dims=['scorer'])
pval_da['scorer'] = [x for x in bp_df.index.values]
pval_da.name = 'pvalue'
perm_da = xr.DataArray(perm_scores, dims=['scorer', 'permutations'])
perm_da['scorer'] = [x for x in bp_df.index.values]
perm_da['permutations'] = np.arange(1, Ptest+1)
perm_da.name = 'permutation_score'
ds = xr.merge([true_da, pval_da, perm_da])
return ds
ml = ML_Classifier_Switcher()
if params is not None:
best_p_df = best_params_df['1-{}'.format(len(best_params_df))]
for key, value in params.items():
if isinstance(value, tuple):
for ind in best_p_df.index:
best_p_df.at[ind, key] = value
else:
best_p_df[key] = value
dss = run_one_permutation_test(bp_df=best_p_df)
else:
if verbose:
print('Picking {} model with best params'.format(model_name))
splits = []
for i, df in enumerate(best_params_df.values()):
if verbose:
print('running on split #{}'.format(i+1))
ds = run_one_permutation_test()
splits.append(ds)
dss = xr.concat(splits, dim='outer_split')
dss['outer_split'] = np.arange(1, len(best_params_df)+ 1)
return dss
def run_test_on_CV_split(X_train, y_train, X_test, y_test, param_df,
model_name='SVC', verbose=False, PI=None,
Ptest=None):
import numpy as np
import xarray as xr
import pandas as pd
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.inspection import permutation_importance
best_df = param_df.copy()
ml = ML_Classifier_Switcher()
if verbose:
print('Picking {} model with best params'.format(model_name))
# print('Features are: {}'.format(features))
test_scores = []
fi_list = []
mean_fpr = np.linspace(0, 1, 100)
tprs = []
roc_aucs = []
pi_mean_list = []
pi_std_list = []
for scorer in best_df.index:
sk_model = ml.pick_model(model_name)
# get best params (drop two last cols since they are not params):
params = best_df.T[scorer][:-2].to_dict()
if verbose:
print('{} scorer, params:{}'.format(scorer, params))
sk_model.set_params(**params)
sk_model.fit(X_train, y_train)
if hasattr(sk_model, 'feature_importances_'):
# print(X_train['feature'])
# input('press any key')
FI = xr.DataArray(sk_model.feature_importances_, dims=['feature'])
FI['feature'] = X_train['feature']
fi_list.append(FI)
y_pred = sk_model.predict(X_test)
fpr, tpr, _ = roc_curve(y_test, y_pred)
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
roc_auc = roc_auc_score(y_test, y_pred)
roc_aucs.append(roc_auc)
tprs.append(interp_tpr)
score = scorer_function(scorer, y_test, y_pred)
test_scores.append(score)
if PI is not None:
pi = permutation_importance(sk_model, X_test, y_test,
n_repeats=PI,
scoring=scorers(scorer),
random_state=0, n_jobs=-1)
pi_mean = xr.DataArray(pi['importances_mean'], dims='feature')
pi_std = xr.DataArray(pi['importances_std'], dims='feature')
pi_mean.name = 'PI_mean'
pi_std.name = 'PI_std'
pi_mean['feature'] = X_train['feature']
pi_std['feature'] = X_train['feature']
pi_mean_list.append(pi_mean)
pi_std_list.append(pi_std)
if PI is not None:
pi_mean_da = xr.concat(pi_mean_list, 'scorer')
pi_std_da = xr.concat(pi_std_list, 'scorer')
pi_mean_da['scorer'] = [x for x in best_df.index.values]
pi_std_da['scorer'] = [x for x in best_df.index.values]
roc_df = pd.DataFrame(tprs).T
roc_df.columns = [x for x in best_df.index]
roc_df.index = mean_fpr
best_df['test_score'] = test_scores
best_df['roc_auc_score'] = roc_aucs
if hasattr(sk_model, 'feature_importances_'):
fi = xr.concat(fi_list, 'scorer')
fi['scorer'] = [x for x in best_df.index.values]
if PI is not None:
return best_df, roc_df, fi, pi_mean_da, pi_std_da
else:
return best_df, roc_df, fi
elif PI is not None:
return best_df, roc_df, pi_mean_da, pi_std_da
else:
return best_df, roc_df
def holdout_test(path=hydro_path, gr_path=hydro_ml_path/'holdout',
model_name='SVC', features='pwv', return_RF_FI=False,
verbose=False):
"""do a holdout test with best model from gridsearchcv
with all scorers"""
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
import xarray as xr
import pandas as pd
import numpy as np
# process gridsearchcv results:
best_df, test_ratio, seed = load_one_gridsearchcv_object(path=gr_path,
cv_type='holdout',
features=features,
model_name=model_name,
verbose=False)
print('Using random seed of {} and {}% test ratio'.format(seed, test_ratio))
ts = int(test_ratio) / 100
X, y = prepare_X_y_for_holdout_test(features, model_name, path)
# split using test_size and seed:
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=ts,
random_state=int(seed),
stratify=y)
if verbose:
print('y train pos/neg:{}, {}'.format((y_train==1).sum().item(),(y_train==0).sum().item()))
print('y test pos/neg:{}, {}'.format((y_test==1).sum().item(),(y_test==0).sum().item()))
# pick model and set the params to best from gridsearchcv:
ml = ML_Classifier_Switcher()
print('Picking {} model with best params'.format(model_name))
print('Features are: {}'.format(features))
test_scores = []
fi_list = []
mean_fpr = np.linspace(0, 1, 100)
tprs = []
roc_aucs = []
for scorer in best_df.index:
sk_model = ml.pick_model(model_name)
# get best params (drop two last cols since they are not params):
params = best_df.T[scorer][:-2].to_dict()
if verbose:
print('{} scorer, params:{}'.format(scorer, params))
sk_model.set_params(**params)
sk_model.fit(X_train, y_train)
if hasattr(sk_model, 'feature_importances_'):
FI = xr.DataArray(sk_model.feature_importances_, dims=['feature'])
FI['feature'] = X_train['feature']
fi_list.append(FI)
y_pred = sk_model.predict(X_test)
fpr, tpr, _ = roc_curve(y_test, y_pred)
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
roc_auc = roc_auc_score(y_test, y_pred)
roc_aucs.append(roc_auc)
tprs.append(interp_tpr)
score = scorer_function(scorer, y_test, y_pred)
test_scores.append(score)
roc_df = pd.DataFrame(tprs).T
roc_df.columns = [x for x in best_df.index]
roc_df.index = mean_fpr
best_df['holdout_test_scores'] = test_scores
best_df['roc_auc_score'] = roc_aucs
if fi_list and return_RF_FI:
da = xr.concat(fi_list, 'scorer')
da['scorer'] = best_df.index.values
da.name = 'RF_feature_importances'
return da
return best_df, roc_df
def load_one_gridsearchcv_object(path=hydro_ml_path, cv_type='holdout', features='pwv',
model_name='SVC', verbose=True):
"""load one gridsearchcv obj with model_name and features and run read_one_gridsearchcv_object"""
from aux_gps import path_glob
import joblib
# first filter for model name:
if verbose:
print('loading GridsearchCVs results for {} model with {} cv type'.format(model_name, cv_type))
model_files = path_glob(path, 'GRSRCHCV_{}_*.pkl'.format(cv_type))
model_files = [x for x in model_files if model_name in x.as_posix()]
# now select features:
if verbose:
print('loading GridsearchCVs results with {} features'.format(features))
model_features = [x.as_posix().split('/')[-1].split('_')[3] for x in model_files]
feat_ind = get_feature_set_from_list(model_features, features)
# also get the test ratio and seed number:
if len(feat_ind) > 1:
if verbose:
print('found {} GR objects.'.format(len(feat_ind)))
files = sorted([model_files[x] for x in feat_ind])
outer_splits = [x.as_posix().split('/')[-1].split('.')[0].split('_')[-3] for x in files]
grs = [joblib.load(x) for x in files]
best_dfs = [read_one_gridsearchcv_object(x) for x in grs]
di = dict(zip(outer_splits, best_dfs))
return di
else:
file = model_files[feat_ind]
seed = file.as_posix().split('/')[-1].split('.')[0].split('_')[-1]
outer_splits = file.as_posix().split('/')[-1].split('.')[0].split('_')[-3]
# load and produce best_df:
gr = joblib.load(file)
best_df = read_one_gridsearchcv_object(gr)
return best_df, outer_splits, seed
def get_feature_set_from_list(model_features_list, features, sep='+'):
"""select features from model_features_list,
return the index in the model_features_list and the entry itself"""
# first find if features is a single or multiple features:
if isinstance(features, str) and sep not in features:
try:
ind = [i for i, e in enumerate(model_features_list) if e == features]
# ind = model_features_list.index(features)
except ValueError:
raise ValueError('{} is not in {}'.format(features, ', '.join(model_features_list)))
elif isinstance(features, str) and sep in features:
features_split = features.split(sep)
mf = [x.split(sep) for x in model_features_list]
bool_list = [set(features_split) == (set(x)) for x in mf]
ind = [i for i, x in enumerate(bool_list) if x]
# print(len(ind))
# ind = ind[0]
# feat = model_features_list[ind]
# feat = model_features_list[ind]
return ind
def read_one_gridsearchcv_object(gr):
"""read one gridsearchcv multimetric object and
get the best params, best mean/std scores"""
import pandas as pd
# first get all the scorers used:
scorers = [x for x in gr.scorer_.keys()]
# now loop over the scorers:
best_params = []
best_mean_scores = []
best_std_scores = []
for scorer in scorers:
df_mean = pd.concat([pd.DataFrame(gr.cv_results_["params"]), pd.DataFrame(
gr.cv_results_["mean_test_{}".format(scorer)], columns=[scorer])], axis=1)
df_std = pd.concat([pd.DataFrame(gr.cv_results_["params"]), pd.DataFrame(
gr.cv_results_["std_test_{}".format(scorer)], columns=[scorer])], axis=1)
# best index = highest score:
best_ind = df_mean[scorer].idxmax()
best_mean_scores.append(df_mean.iloc[best_ind][scorer])
best_std_scores.append(df_std.iloc[best_ind][scorer])
best_params.append(df_mean.iloc[best_ind].to_frame().T.iloc[:, :-1])
best_df = pd.concat(best_params)
best_df['mean_score'] = best_mean_scores
best_df['std_score'] = best_std_scores
best_df.index = scorers
return best_df
# # param grid dict:
# params = gr.param_grid
# # scorer names:
# scoring = [x for x in gr.scoring.keys()]
# # df:
# df = pd.DataFrame().from_dict(gr.cv_results_)
# # produce multiindex from param_grid dict:
# param_names = [x for x in params.keys()]
# # unpack param_grid vals to list of lists:
# pro = [[y for y in x] for x in params.values()]
# ind = pd.MultiIndex.from_product((pro), names=param_names)
# df.index = ind
# best_params = []
# best_mean_scores = []
# best_std_scores = []
# for scorer in scoring:
# best_params.append(df[df['rank_test_{}'.format(scorer)]==1]['mean_test_{}'.format(scorer)].index[0])
# best_mean_scores.append(df[df['rank_test_{}'.format(scorer)]==1]['mean_test_{}'.format(scorer)].iloc[0])
# best_std_scores.append(df[df['rank_test_{}'.format(scorer)]==1]['std_test_{}'.format(scorer)].iloc[0])
# best_df = pd.DataFrame(best_params, index=scoring, columns=param_names)
# best_df['mean_score'] = best_mean_scores
# best_df['std_score'] = best_std_scores
# return best_df, best_df_1
def process_gridsearch_results(GridSearchCV, model_name,
split_dim='inner_kfold', features=None,
pwv_id=None, hs_id=None, test_size=None):
import xarray as xr
import pandas as pd
import numpy as np
# finish getting best results from all scorers togather
"""takes GridSreachCV object with cv_results and xarray it into dataarray"""
params = GridSearchCV.param_grid
scoring = GridSearchCV.scoring
results = GridSearchCV.cv_results_
# for scorer in scoring:
# for sample in ['train', 'test']:
# sample_score_mean = results['mean_{}_{}'.format(sample, scorer)]
# sample_score_std = results['std_{}_{}'.format(sample, scorer)]
# best_index = np.nonzero(results['rank_test_{}'.format(scorer)] == 1)[0][0]
# best_score = results['mean_test_{}'.format(scorer)][best_index]
names = [x for x in params.keys()]
# unpack param_grid vals to list of lists:
pro = [[y for y in x] for x in params.values()]
ind = pd.MultiIndex.from_product((pro), names=names)
# result_names = [x for x in GridSearchCV.cv_results_.keys() if 'split'
# not in x and 'time' not in x and 'param' not in x and
# 'rank' not in x]
result_names = [
x for x in results.keys() if 'param' not in x]
ds = xr.Dataset()
for da_name in result_names:
da = xr.DataArray(results[da_name])
ds[da_name] = da
ds = ds.assign(dim_0=ind).unstack('dim_0')
for dim in ds.dims:
if ds[dim].dtype == 'O':
try:
ds[dim] = ds[dim].astype(str)
except ValueError:
ds = ds.assign_coords({dim: [str(x) for x in ds[dim].values]})
if ('True' in ds[dim]) and ('False' in ds[dim]):
ds[dim] = ds[dim] == 'True'
# get all splits data and concat them along number of splits:
all_splits = [x for x in ds.data_vars if 'split' in x]
train_splits = [x for x in all_splits if 'train' in x]
test_splits = [x for x in all_splits if 'test' in x]
# loop over scorers:
trains = []
tests = []
for scorer in scoring:
train_splits_scorer = [x for x in train_splits if scorer in x]
trains.append(xr.concat([ds[x]
for x in train_splits_scorer], split_dim))
test_splits_scorer = [x for x in test_splits if scorer in x]
tests.append(xr.concat([ds[x] for x in test_splits_scorer], split_dim))
splits_scorer = np.arange(1, len(train_splits_scorer) + 1)
train_splits = xr.concat(trains, 'scoring')
test_splits = xr.concat(tests, 'scoring')
# splits = [x for x in range(len(train_splits))]
# train_splits = xr.concat([ds[x] for x in train_splits], 'split')
# test_splits = xr.concat([ds[x] for x in test_splits], 'split')
# replace splits data vars with newly dataarrays:
ds = ds[[x for x in ds.data_vars if x not in all_splits]]
ds['split_train_score'] = train_splits
ds['split_test_score'] = test_splits
ds[split_dim] = splits_scorer
if isinstance(scoring, list):
ds['scoring'] = scoring
elif isinstance(scoring, dict):
ds['scoring'] = [x for x in scoring.keys()]
ds.attrs['name'] = 'CV_results'
ds.attrs['param_names'] = names
ds.attrs['model_name'] = model_name
ds.attrs['{}_splits'.format(split_dim)] = ds[split_dim].size
if GridSearchCV.refit:
if hasattr(GridSearchCV.best_estimator_, 'feature_importances_'):
f_import = xr.DataArray(
GridSearchCV.best_estimator_.feature_importances_,
dims=['feature'])
f_import['feature'] = features
ds['feature_importances'] = f_import
ds['best_score'] = GridSearchCV.best_score_
# ds['best_model'] = GridSearchCV.best_estimator_
ds.attrs['refitted_scorer'] = GridSearchCV.refit
for name in names:
if isinstance(GridSearchCV.best_params_[name], tuple):
GridSearchCV.best_params_[name] = ','.join(
map(str, GridSearchCV.best_params_[name]))
ds['best_{}'.format(name)] = GridSearchCV.best_params_[name]
return ds, GridSearchCV.best_estimator_
else:
return ds, None
def save_cv_results(cvr, savepath=hydro_path):
from aux_gps import save_ncfile
features = '+'.join(cvr.attrs['features'])
# pwv_id = cvr.attrs['pwv_id']
# hs_id = cvr.attrs['hs_id']
# neg_pos_ratio = cvr.attrs['neg_pos_ratio']
ikfolds = cvr.attrs['inner_kfold_splits']
okfolds = cvr.attrs['outer_kfold_splits']
name = cvr.attrs['model_name']
refitted_scorer = cvr.attrs['refitted_scorer'].replace('_', '-')
# filename = 'CVR_{}_{}_{}_{}_{}_{}_{}_{}.nc'.format(pwv_id, hs_id,
# name, features, refitted_scorer, ikfolds, okfolds, neg_pos_ratio)
filename = 'CVR_{}_{}_{}_{}_{}.nc'.format(
name, features, refitted_scorer, ikfolds, okfolds)
save_ncfile(cvr, savepath, filename)
return
def scikit_fit_predict(X, y, seed=42, with_pressure=True, n_splits=7,
plot=True):
# step1: CV for train/val (80% from 80-20 test). display results with
# model and scores(AUC, f1), use StratifiedKFold
# step 2: use validated model with test (20%) and build ROC curve
# step 3: add features (pressure) but check for correlation
# check permutations with scikit learn
from sklearn.model_selection import train_test_split
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.metrics import f1_score
from sklearn.metrics import plot_roc_curve
from sklearn.svm import SVC
from numpy import interp
from sklearn.metrics import auc
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
from sklearn.model_selection import LeaveOneOut
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
if not with_pressure:
just_pw = [x for x in X.feature.values if 'pressure' not in x]
X = X.sel(feature=just_pw)
X_tt, X_test, y_tt, y_test = train_test_split(
X, y, test_size=0.2, shuffle=True, random_state=seed)
cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
# cv = LeaveOneOut()
classifier = SVC(kernel='rbf', probability=False,
random_state=seed)
# classifier = LinearDiscriminantAnalysis()
# clf = QuadraticDiscriminantAnalysis()
scores = []
fig, ax = plt.subplots()
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
for i, (train, val) in enumerate(cv.split(X_tt, y_tt)):
# for i in range(100):
# X_train, X_val, y_train, y_val = train_test_split(
# X_tt, y_tt, shuffle=True, test_size=0.5, random_state=i)
# clf.fit(X_train, y_train)
classifier.fit(X_tt[train], y_tt[train])
# viz = plot_roc_curve(clf, X_val, y_val,
# name='ROC run {}'.format(i),
# alpha=0.3, lw=1, ax=ax)
viz = plot_roc_curve(classifier, X_tt[val], y_tt[val],
name='ROC fold {}'.format(i),
alpha=0.3, lw=1, ax=ax)
interp_tpr = interp(mean_fpr, viz.fpr, viz.tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
# aucs.append(viz.roc_auc)
# y_pred = clf.predict(X_val)
y_pred = classifier.predict(X_tt[val])
aucs.append(roc_auc_score(y_tt[val], y_pred))
# scores.append(clf.score(X_val, y_val))
scores.append(f1_score(y_tt[val], y_pred))
scores = np.array(scores)
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],
title="Receiver operating characteristic example")
ax.legend(loc="lower right")
ax.set_title(
'ROC curve for KFold={}, with pressure anomalies.'.format(n_splits))
if not with_pressure:
ax.set_title(
'ROC curve for KFold={}, without pressure anomalies.'.format(n_splits))
y_test_predict = classifier.predict(X_test)
print('final test predict score:')
print(f1_score(y_test, y_test_predict))
if plot:
plt.figure()
plt.hist(scores, bins=15, edgecolor='k')
return scores
# clf.fit(X,y)
def produce_X_y_from_list(pw_stations=['drag', 'dsea', 'elat'],
hs_ids=[48125, 48199, 60170],
pressure_station='bet-dagan', max_flow=0,
window=25, neg_pos_ratio=1, path=work_yuval,
ims_path=ims_path, hydro_path=hydro_path,
concat_Xy=False):
if isinstance(hs_ids, int):
hs_ids = [hs_ids for x in range(len(pw_stations))]
kwargs = locals()
[kwargs.pop(x) for x in ['pw_stations', 'hs_ids', 'concat_Xy']]
Xs = []
ys = []
for pw_station, hs_id in list(zip(pw_stations, hs_ids)):
X, y = produce_X_y(pw_station, hs_id, **kwargs)
Xs.append(X)
ys.append(y)
if concat_Xy:
print('concatenating pwv stations {}, with hydro_ids {}.'.format(
pw_stations, hs_ids))
X, y = concat_X_y(Xs, ys)
return X, y
else:
return Xs, ys
def concat_X_y(Xs, ys):
import xarray as xr
import pandas as pd
X_attrs = [x.attrs for x in Xs]
X_com_attrs = dict(zip( | pd.DataFrame(X_attrs) | pandas.DataFrame |
# <NAME>
# 2020-08-16
import numpy as np
import pandas as pd
import geopandas as gpd
import rtree
import itertools
from shapely.geometry import MultiPoint, LineString
from shapely.ops import snap, split
pd.options.mode.chained_assignment = None
def connect_poi(pois, nodes, edges, key_col=None, path=None, threshold=200, knn=5, meter_epsg=3857):
"""Connect and integrate a set of POIs into an existing road network.
Given a road network in the form of two GeoDataFrames: nodes and edges,
link each POI to the nearest edge (road segment) based on its projection
point (PP) and generate a new integrated road network including the POIs,
the projected points, and the connection edge.
Args:
pois (GeoDataFrame): a gdf of POI (geom: Point)
nodes (GeoDataFrame): a gdf of road network nodes (geom: Point)
edges (GeoDataFrame): a gdf of road network edges (geom: LineString)
key_col (str): a unique key column of pois should be provided,
e.g., 'index', 'osmid', 'poi_number', etc.
Currently, this will be renamed into 'osmid' in the output.
[NOTE] For use in pandana, you may want to ensure this
column is numeric-only to avoid processing errors.
Preferably use unique integers (int or str) only,
and be aware not to intersect with the node key,
'osmid' if you use OSM data, in the nodes gdf.
path (str): directory path to use for saving files (nodes and edges).
Outputs will NOT be saved if this arg is not specified.
threshold (int): the max length of a POI connection edge, POIs with
connection edge beyond this length will be removed.
The unit is in meters as crs epsg is set to 3857 by
default during processing.
knn (int): k nearest neighbors to query for the nearest edge.
Consider increasing this number up to 10 if the connection
output is slightly unreasonable. But higher knn number will
slow down the process.
meter_epsg (int): preferred EPSG in meter units. Suggested 3857 or 3395.
Returns:
nodes (GeoDataFrame): the original gdf with POIs and PPs appended
edges (GeoDataFrame): the original gdf with connection edges appended
and existing edges updated (if PPs are present)
Note:
1. Make sure all three input GeoDataFrames have defined crs attribute.
Try something like `gdf.crs` or `gdf.crs = 'epsg:4326'`.
They will then be converted into epsg:3857 or specified meter_epsg for processing.
"""
## STAGE 0: initialization
# 0-1: helper functions
def find_kne(point, lines):
dists = np.array(list(map(lambda l: l.distance(point), lines)))
kne_pos = dists.argsort()[0]
kne = lines.iloc[[kne_pos]]
kne_idx = kne.index[0]
return kne_idx, kne.values[0]
def get_pp(point, line):
"""Get the projected point (pp) of 'point' on 'line'."""
# project new Point to be interpolated
pp = line.interpolate(line.project(point)) # PP as a Point
return pp
def split_line(line, pps):
"""Split 'line' by all intersecting 'pps' (as multipoint).
Returns:
new_lines (list): a list of all line segments after the split
"""
# IMPORTANT FIX for ensuring intersection between splitters and the line
# but no need for updating edges_meter manually because the old lines will be
# replaced anyway
line = snap(line, pps, 1e-8) # slow?
try:
new_lines = list(split(line, pps)) # split into segments
return new_lines
except TypeError as e:
print('Error when splitting line: {}\n{}\n{}\n'.format(e, line, pps))
return []
def update_nodes(nodes, new_points, ptype, meter_epsg=3857):
"""Update nodes with a list (pp) or a GeoDataFrame (poi) of new_points.
Args:
ptype: type of Point list to append, 'pp' or 'poi'
"""
# create gdf of new nodes (projected PAPs)
if ptype == 'pp':
new_nodes = gpd.GeoDataFrame(new_points, columns=['geometry'], crs=f'epsg:{meter_epsg}')
n = len(new_nodes)
new_nodes['highway'] = node_highway_pp
new_nodes['osmid'] = [int(osmid_prefix + i) for i in range(n)]
# create gdf of new nodes (original POIs)
elif ptype == 'poi':
new_nodes = new_points[['geometry', key_col]]
new_nodes.columns = ['geometry', 'osmid']
new_nodes['highway'] = node_highway_poi
new_nodes['osmid'] = new_nodes['osmid'].astype(int)
else:
print("Unknown ptype when updating nodes.")
# merge new nodes (it is safe to ignore the index for nodes)
gdfs = [nodes, new_nodes]
nodes = gpd.GeoDataFrame( | pd.concat(gdfs, ignore_index=True, sort=False) | pandas.concat |
import os
import argparse
import pandas as pd
from tools.utils import process_gt_metadata
def combine_inferences(
ground_truth_csv: str,
our_csv: str,
bsnet_csv: str,
covid_net_csv: str,
save_dir: str,
save_name: str,
) -> None:
df_ground_truth = pd.read_csv(ground_truth_csv)
df_ground_truth = process_gt_metadata(df_ground_truth)
df_ground_truth = df_ground_truth.rename(columns={'Score C': 'GT'})
df_our = pd.read_csv(our_csv)
df_our = df_our.rename(columns={'score': 'Our'})
df_bsnet = | pd.read_csv(bsnet_csv) | pandas.read_csv |
"""Daily avg wind speeds"""
import datetime
import psycopg2.extras
import numpy as np
import pandas as pd
import matplotlib.patheffects as PathEffects
from pyiem.util import drct2text
from pyiem.datatypes import speed
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.plot.use_agg import plt
from pyiem.exceptions import NoDataFound
PDICT = {'KT': 'knots',
'MPH': 'miles per hour',
'MPS': 'meters per second',
'KMH': 'kilometers per hour'}
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['cache'] = 86400
desc['description'] = """This plot displays daily average wind speeds for
a given year and month of your choice. These values are computed by the
IEM using available observations. Some observation sites explicitly
produce an average wind speed, but that is not considered for this plot.
You can download daily summary data
<a href="/request/daily.phtml" class="alert-link">here</a>.
The average wind direction
is computed by vector averaging of the wind speed and direction reports.
"""
desc['arguments'] = [
dict(type='sid', name='zstation', default='DSM',
network='IA_ASOS', label='Select Station:'),
dict(type='year', name='year', default=datetime.datetime.now().year,
label='Select Year:'),
dict(type='month', name='month', default=datetime.datetime.now().month,
label='Select Month:'),
dict(type='select', name='units', default='MPH',
label='Wind Speed Units:', options=PDICT),
]
return desc
def draw_line(x, y, angle):
"""Draw a line"""
r = 0.25
plt.arrow(x, y, r * np.cos(angle), r * np.sin(angle),
head_width=0.35, head_length=0.5, fc='k', ec='k')
def plotter(fdict):
""" Go """
pgconn = get_dbconn('iem')
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
ctx = get_autoplot_context(fdict, get_description())
station = ctx['zstation']
units = ctx['units']
year = ctx['year']
month = ctx['month']
sts = datetime.date(year, month, 1)
ets = (sts + datetime.timedelta(days=35)).replace(day=1)
cursor.execute("""
SELECT day, avg_sknt, vector_avg_drct from summary s JOIN stations t
ON (t.iemid = s.iemid) WHERE t.id = %s and t.network = %s and
s.day >= %s and s.day < %s ORDER by day ASC
""", (station, ctx['network'], sts, ets))
days = []
drct = []
sknt = []
for row in cursor:
if row[1] is None:
continue
days.append(row[0].day)
drct.append(row[2])
sknt.append(row[1])
if not sknt:
raise NoDataFound("ERROR: No Data Found")
df = pd.DataFrame(dict(day=pd.Series(days),
drct=pd.Series(drct),
sknt= | pd.Series(sknt) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.