prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# Project: GBS Tool
# Author: Dr. <NAME>, <EMAIL>, denamics GmbH
# Date: February 27, 2018
# License: MIT License (see LICENSE file of this package for more information)
import warnings
import numpy as np
import pandas as pd
def getDataSubsetsRELoadOneWeek(dataframe, otherInputs):
"""
Selects datasubsets based on RE penetration levels and load levels. It searches for the weeks with min, mean, and
max absolute RE levels, and for min, mean, and max load levels.
:param dataframe: [Dataframe] contains full time-series of necessary model input channels
:param otherInputs: [list(str)] bin for additional parameters for implemented method. Currently not used.
:return datasubsets: [Dataframe of dataframes] the subsets of timeseries for all input channels required to run the
model organized depending on method of extraction.
:return databins: [DataFrame] provides weights each data frame in datasubsets should be given when extrapolating results.
This dataframe has two columns of the same length as the original time series. Column 'loadBins' contains the
binning based on average load (min = 1, mean = 2, max = 3), column 'varGenBins' does the same for the variable
generation dimension.
"""
# Window size: one week in seconds
# NOTE: to run shorter code snippets [faster iterations] you can reduce the window size here.
wdwSize = 60 * 60 * 24 * 7
# Get rolling one week load averages. We assume that time units are seconds here.
# just for convenience and readability we'll pull the dataframe apart
time = pd.Series(dataframe['time'])
firmLoadP = pd.Series(dataframe['firmLoadP'])
varGenP = pd.Series(dataframe['varGenP'])
# Inspect sampling rate to determine number of samples per week
srates = time.diff()
srate = srates.mean()
srateStd = srates.std()
if srateStd > 0:
warnings.warn(
'Non-uniform time vector detected. User is advised that gaps in the time vector can cause unreliable and unwanted behavior.')
# Inspect length of time vector to determine if there is at least one week of record
timeLength = time[-1:] - time[0]
# print(timeLength[timeLength.index.max()])
if timeLength[timeLength.index.max()] < 60 * 60 * 24 * 7:
warnings.warn(
'Input samples are less than one week long. Data reduction with RE-load-one-week method not possible. Full data set will be used.')
# Run through load channel and find week w least avg load, mean avg load and max avg load
# AND Run through combined RE channels to find week w least avg RE P, mean avg RE P and max avg RE P
# If the sampling rate is not one sample per second we need to adjust the window size
wind = int(np.round(wdwSize / srate))
firmLoadPWklyAvg = firmLoadP.copy()
firmLoadPWklyMax = firmLoadP.copy()
firmLoadPWklyMin = firmLoadP.copy()
varGenPWklyAvg = varGenP.copy()
varGenPWklyMax = varGenP.copy()
varGenPWklyMin = varGenP.copy()
for indx in range(0, timeLength.index.max(), wind):
# Load calculations
firmLoadPWklyAvg[indx:indx + wind] = firmLoadP[indx:indx + wind].mean()
firmLoadPWklyMax[indx:indx + wind] = firmLoadP[indx:indx + wind].max()
firmLoadPWklyMin[indx:indx + wind] = firmLoadP[indx:indx + wind].min()
# Variable RE calculations
varGenPWklyAvg[indx:indx + wind] = varGenP[indx:indx + wind].mean()
varGenPWklyMax[indx:indx + wind] = varGenP[indx:indx + wind].max()
varGenPWklyMin[indx:indx + wind] = varGenP[indx:indx + wind].min()
# Get the mean load week
meanDiff = (firmLoadPWklyAvg - firmLoadPWklyAvg.mean()).abs()
minDiffIdx = meanDiff.idxmin()
meanLoadTime = time[minDiffIdx:minDiffIdx + wind]
firmLoadPMean = firmLoadP[minDiffIdx:minDiffIdx + wind]
varGenPLoadMean = varGenP[minDiffIdx:minDiffIdx + wind]
# Get the max load week
maxIdx = firmLoadPWklyMax.idxmax()
maxLoadTime = time[maxIdx:maxIdx + wind]
firmLoadPMax = firmLoadP[maxIdx:maxIdx + wind]
varGenPLoadMax = varGenP[maxIdx:maxIdx + wind]
# Get the min load week
minIdx = firmLoadPWklyMax.idxmin()
minLoadTime = time[minIdx:minIdx + wind]
firmLoadPMin = firmLoadP[minIdx:minIdx + wind]
varGenPLoadMin = varGenP[minIdx:minIdx + wind]
# Get the mean var gen week
meanDiffVG = (varGenPWklyAvg - varGenPWklyAvg.mean()).abs()
minDiffIdxVG = meanDiffVG.idxmin()
meanVGTime = time[minDiffIdxVG:minDiffIdxVG + wind]
firmLoadPVGMean = firmLoadP[minDiffIdxVG:minDiffIdxVG + wind]
varGenPMean = varGenP[minDiffIdxVG:minDiffIdxVG + wind]
# Get the max var gen week
maxIdxVG = varGenPWklyMax.idxmax()
maxVGTime = time[maxIdxVG:maxIdxVG + wind]
firmLoadPVGMax = firmLoadP[maxIdxVG:maxIdxVG + wind]
varGenPMax = varGenP[maxIdxVG:maxIdxVG + wind]
# Get the min var gen week
minIdxVG = varGenPWklyMin.idxmin()
minVGTime = time[minIdxVG:minIdxVG + wind]
firmLoadPVGMin = firmLoadP[minIdxVG:minIdxVG + wind]
varGenPMin = varGenP[minIdxVG:minIdxVG + wind]
# Create data binning
# We will bin by similarity. That is, we check if the weekly mean is closest to min/mean/max and bin accordingly
# Set up binning dimensions first:
firmLoadPBin = firmLoadP.copy()
varGenPBin = varGenP.copy()
for indx in range(0, timeLength.index.max(), wind):
# Load binning
minDiff = (firmLoadP[indx:indx + wind] - firmLoadPMin.mean()).abs()
minDiff = minDiff.mean()
meanDiff = (firmLoadP[indx:indx + wind] - firmLoadPMean.mean()).abs()
meanDiff = meanDiff.mean()
maxDiff = (firmLoadP[indx:indx + wind] - firmLoadPMax.mean()).abs()
maxDiff = maxDiff.mean()
if (minDiff < meanDiff) & (minDiff < maxDiff):
firmLoadPBin[indx:indx + wind] = 1
elif (meanDiff <= minDiff) & (meanDiff <= maxDiff):
firmLoadPBin[indx:indx + wind] = 2
else:
firmLoadPBin[indx:indx + wind] = 3
# Var Gen binning
minDiff = (varGenP[indx:indx + wind] - varGenPMin.mean()).abs()
minDiff = minDiff.mean()
meanDiff = (varGenP[indx:indx + wind] - varGenPMean.mean()).abs()
meanDiff = meanDiff.mean()
maxDiff = (varGenP[indx:indx + wind] - varGenPMax.mean()).abs()
maxDiff = maxDiff.mean()
if (minDiff < meanDiff) & (minDiff < maxDiff):
varGenPBin[indx:indx + wind] = 1
elif (meanDiff <= minDiff) & (meanDiff <= maxDiff):
varGenPBin[indx:indx + wind] = 2
else:
varGenPBin[indx:indx + wind] = 3
# assemble data sets
datasubsetMinLoad = pd.DataFrame({'time': minLoadTime, 'firmLoadP': firmLoadPMin, 'varGenP': varGenPLoadMin})
datasubsetMeanLoad = \
| pd.DataFrame({'time': meanLoadTime, 'firmLoadP': firmLoadPMean, 'varGenP': varGenPLoadMean}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 13 15:41:00 2018
@author: <NAME>
"""
import pandas as pd
import numpy as np
from scipy.stats import binned_statistic
from sklearn.feature_extraction.text import CountVectorizer
import matplotlib.pyplot as plt
import matplotlib
from .colors import numeric_cmap, categoric_cmap, colors
def categorical_stats(data, x, y=None, color=None, stats=None):
"""
Get groupby statistics from a pandas dataframe
Arguments
---------
data: dataFrame
raw data
x: str
column to group stats by
y: str
column to get stats of
color: str
subgroups column, as in plotting
stats: None, 'quartiles', or other
the statistics to get.
If None, get counts.
If 'quartiles' get quartiles
Other stats should be pandas groupby.agg functions eg 'mean', or
['mean', 'sd']
Returns
-------
statstics: dataFrame
stats with columns, x, color, and statistics columns
"""
df = | pd.DataFrame() | pandas.DataFrame |
"""Code to determine branches of strong connections from activity"""
import logging
import numpy as np
import pandas as pd
from tqdm.auto import tqdm as pbar
import matplotlib.ticker
import matplotlib.cm
import matplotlib.colors
from tctx.util import spike_trains as spt, plot, parallel, plot_graph
from tctx.analysis import simbatch as sb
from tctx.analysis import amat as am
########################################################################################################################
# Plotting
def _get_label_colors(count=100):
label_colors = {
np.nan: 'xkcd:grey',
-3: 'xkcd:charcoal',
-2: 'xkcd:grey',
-1: 'xkcd:purple',
0: plot.styles_df.loc['cluster_a', 'main'], # green
1: plot.styles_df.loc['cluster_b', 'main'], # orange
2: plot.styles_df.loc['cluster_c', 'main'], # yellow
3: plot.styles_df.loc['cluster_d', 'main'], # brown-ish
4: 'xkcd:royal blue',
5: 'xkcd:red',
6: 'xkcd:pink',
7: 'xkcd:cyan',
8: 'xkcd:olive',
9: 'xkcd:coral',
10: 'xkcd:black',
11: 'xkcd:sage',
12: 'xkcd:sienna',
13: 'xkcd:sick green',
14: 'xkcd:cloudy blue',
15: 'xkcd:strong pink',
16: 'xkcd:windows blue',
17: 'xkcd:purpley grey',
18: 'xkcd:old rose',
19: 'xkcd:seafoam',
20: 'xkcd:baby blue',
}
for i in range(int(np.nanmax(list(label_colors.keys()))), count):
label_colors[i] = matplotlib.cm.get_cmap('jet')(np.random.rand())
label_colors['a'] = label_colors[0]
label_colors['b'] = label_colors[1]
return label_colors
LABEL_COLORS = _get_label_colors()
LABEL_COLORS_DARK = {
k: plot.lighten_color(v, 1.5)
for k, v in LABEL_COLORS.items()
}
def _colored_matrix_set_item_labels(row_labels, col_labels, by='both') -> np.ndarray:
"""
produce a single matrix with integer values representing the
merged label of row & columns
"""
if by == 'none':
label_mat = np.zeros((len(row_labels), len(col_labels)))
elif by == 'col':
label_mat = np.tile(col_labels, (len(row_labels), 1))
elif by == 'row':
label_mat = np.tile(row_labels, (len(col_labels), 1)).T
else:
label_mesh = np.array(np.meshgrid(col_labels, row_labels))
_, combined_label = np.unique(label_mesh.reshape(2, -1).T, axis=0, return_inverse=True)
label_mat = combined_label.reshape((len(row_labels), len(col_labels)))
return label_mat
def _colored_matrix_get_mapping(
label_colors, background_color='#EFEFEF', outside_color='xkcd:charcoal',
vmin=-10000, vmax=1000
):
"""
Generates a matplotlib-friendly cmap and norm objects
"""
label_colors = label_colors.sort_index()
outside_color = np.array(matplotlib.colors.to_rgb(outside_color))
colors = [outside_color] + list(label_colors.values) + [outside_color]
label_cmap = matplotlib.colors.LinearSegmentedColormap.from_list(
'label_cmap', colors, len(colors))
label_cmap.set_bad(color=background_color)
if len(label_colors) > 1:
boundaries = label_colors.index.values
boundaries = (boundaries[1:] + boundaries[:-1]) * .5
boundaries = np.concatenate([[vmin, np.min(boundaries) - 1], boundaries, [np.max(boundaries) + 1, vmax]])
else:
assert len(label_colors) == 1
v = label_colors.index.values[0]
boundaries = [vmin, v - 1, v + 1, vmax]
norm = matplotlib.colors.BoundaryNorm(boundaries, len(colors))
return label_cmap, norm
def _colored_matrix_mark_trials(ax, amat_sorted, example_trial_idcs, facecolor=None, s=30):
"""Draw a little arrow below the given trials"""
if facecolor is None:
facecolor = 'k'
ax.scatter(
[amat_sorted.columns.get_loc(trial_idx) for trial_idx in example_trial_idcs],
y=[0] * len(example_trial_idcs),
marker='^',
facecolor=facecolor,
transform=ax.get_xaxis_transform(),
s=s,
clip_on=False,
)
def plot_colored_matrix(
ax, amat: pd.DataFrame,
row_label=None, col_label=None,
label_colors=None, color_by=None, background_color='#EFEFEF',
mark_trials=None, mark_trials_colors=None,
):
"""labels must come sorted and sharing index with the index/column of the matrix"""
if label_colors is None:
label_colors = LABEL_COLORS
if color_by is None:
if row_label is not None and col_label is None:
color_by = 'row'
elif row_label is None and col_label is not None:
color_by = 'col'
elif row_label is None and col_label is None:
color_by = 'none'
else:
color_by = 'both'
if row_label is None:
row_label = np.ones(amat.shape[0])
if not isinstance(row_label, pd.Series):
row_label = pd.Series(np.asarray(row_label), index=amat.index)
row_label = row_label.reindex(amat.index)
if col_label is None:
col_label = np.ones(amat.shape[1])
if not isinstance(col_label, pd.Series):
col_label = pd.Series(np.asarray(col_label), index=amat.columns)
col_label = col_label.reindex(amat.columns)
labels = _colored_matrix_set_item_labels(row_label, col_label, color_by)
unique_labels, labels_rebased = np.unique(labels, return_inverse=True)
labels_rebased = labels_rebased.reshape(labels.shape)
# we're going to use nan to represent False
labels_rebased = labels_rebased.astype(np.float)
labels_rebased[~amat.values] = np.nan
mapping = dict(zip(unique_labels, np.arange(len(unique_labels))))
label_colors_rebased = pd.Series({ni: label_colors[i] for i, ni in mapping.items()})
cmap, norm = _colored_matrix_get_mapping(label_colors_rebased, background_color)
ax.imshow(labels_rebased, cmap=cmap, norm=norm, origin='lower')
plot.remove_spines(ax)
ax.tick_params(left=False, labelleft=False, bottom=False, labelbottom=False)
if mark_trials is not None:
_colored_matrix_mark_trials(ax, amat, mark_trials, facecolor=mark_trials_colors)
class RoutingGraphPlot:
"""A single figure of a graph showing the path activity took"""
def __init__(self, graph: plot_graph.Graph, source_gid):
self.graph = graph
self.source_gid = source_gid
def copy(self):
return self.__class__(self.graph.copy(), self.source_gid)
def get_scaled_node_size(self, node_count_range=(15, 50), node_size_range=(5, 30)):
"""linear interpolation of node size based on graph size"""
cell_count = len(self.graph.nodes)
size_norm = (cell_count - node_count_range[0]) / (node_count_range[1] - node_count_range[0])
node_size = node_size_range[1] - (node_size_range[1] - node_size_range[0]) * size_norm
# noinspection PyTypeChecker
node_size = int(min(max(node_size_range[0], node_size), node_size_range[1]))
return node_size
@classmethod
def prepare_graph(
cls,
interesting_cells: pd.DataFrame,
sel_conns: pd.DataFrame,
color_bkg='xkcd:light grey',
orientation='vertical',
):
source = interesting_cells[interesting_cells['is_targeted']]
assert len(source) == 1, source
source_gid = source.index
nodes = interesting_cells.copy()
sb.CAT.add_cats_cells(nodes)
nodes['style'] = interesting_cells['ei_type'].astype(str)
edges = sel_conns.copy()
valid = edges['source'].isin(interesting_cells.index) & edges['target'].isin(interesting_cells.index)
if not np.all(valid):
logging.warning(f'Dropping {np.count_nonzero(~valid)}/{len(valid)} conns with missing nodes')
edges = edges[valid]
sb.CAT.add_cats_conns(edges)
edges['style'] = edges['con_type'].map(lambda x: f'{x[:1]}')
graph = plot_graph.Graph(nodes, edges)
graph.styles.loc['e-bkg'] = plot.style_mix('grey', marker_space='e', main=color_bkg)
graph.styles.loc['i-bkg'] = plot.style_mix('grey', marker_space='i', main=color_bkg)
graph.layout_best_fit(around=source_gid[0], orientation=orientation)
return cls(graph, source_gid)
@staticmethod
def _get_active(graph, active_nodes, active_edges) -> (pd.Series, pd.Series):
if active_nodes is None:
active_nodes = pd.Series(True, index=graph.nodes.index)
if active_nodes.dtype != 'bool':
active_nodes = pd.Series(True, index=active_nodes)
active_nodes = active_nodes.reindex(graph.nodes.index, fill_value=False)
if isinstance(active_edges, str):
active_target = active_nodes.reindex(graph.edges['target'], fill_value=False).values
active_source = active_nodes.reindex(graph.edges['source'], fill_value=False).values
active_edges = active_edges.lower()
if active_edges == 'target':
active_edges = pd.Series(active_target, index=graph.edges.index)
elif active_edges == 'source':
active_edges = | pd.Series(active_source, index=graph.edges.index) | pandas.Series |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: <NAME>
date: 2020/1/9 22:52
contact: <EMAIL>
desc: 金十数据中心-经济指标-央行利率-主要央行利率
https://datacenter.jin10.com/economic
美联储利率决议报告
欧洲央行决议报告
新西兰联储决议报告
中国央行决议报告
瑞士央行决议报告
英国央行决议报告
澳洲联储决议报告
日本央行决议报告
俄罗斯央行决议报告
印度央行决议报告
巴西央行决议报告
"""
import json
import time
import pandas as pd
import requests
# 金十数据中心-经济指标-央行利率-主要央行利率-美联储利率决议报告
def macro_bank_usa_interest_rate():
"""
美联储利率决议报告, 数据区间从19820927-至今
https://datacenter.jin10.com/reportType/dc_usa_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_usa_interest_rate_decision_all.js?v=1578581921
:return: 美联储利率决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国利率决议"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
temp_df.name = "usa_interest_rate"
return temp_df
# 金十数据中心-经济指标-央行利率-主要央行利率-欧洲央行决议报告
def macro_bank_euro_interest_rate():
"""
欧洲央行决议报告, 数据区间从19990101-至今
https://datacenter.jin10.com/reportType/dc_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_interest_rate_decision_all.js?v=1578581663
:return: 欧洲央行决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["欧元区利率决议"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
temp_df.name = "euro_interest_rate"
return temp_df
# 金十数据中心-经济指标-央行利率-主要央行利率-新西兰联储决议报告
def macro_bank_newzealand_interest_rate():
"""
新西兰联储决议报告, 数据区间从19990401-至今
https://datacenter.jin10.com/reportType/dc_newzealand_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_newzealand_interest_rate_decision_all.js?v=1578582075
:return: 新西兰联储决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_newzealand_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["新西兰利率决议报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
temp_df.name = "newzealand_interest_rate"
return temp_df
# 金十数据中心-经济指标-央行利率-主要央行利率-中国央行决议报告
def macro_bank_china_interest_rate():
"""
中国人民银行利率报告, 数据区间从19910501-至今
https://datacenter.jin10.com/reportType/dc_china_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_china_interest_rate_decision_all.js?v=1578582163
:return: 中国人民银行利率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_china_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["中国人民银行利率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
temp_df.name = "china_interest_rate"
return temp_df
# 金十数据中心-经济指标-央行利率-主要央行利率-瑞士央行决议报告
def macro_bank_switzerland_interest_rate():
"""
瑞士央行利率决议报告, 数据区间从20080313-至今
https://datacenter.jin10.com/reportType/dc_switzerland_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_switzerland_interest_rate_decision_all.js?v=1578582240
:return: 瑞士央行利率决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_switzerland_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["瑞士央行利率决议报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
temp_df.name = "switzerland_interest_rate"
return temp_df
# 金十数据中心-经济指标-央行利率-主要央行利率-英国央行决议报告
def macro_bank_english_interest_rate():
"""
英国央行决议报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_english_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_english_interest_rate_decision_all.js?v=1578582331
:return: 英国央行决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_english_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["英国利率决议报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = | pd.to_datetime(date_list) | pandas.to_datetime |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
| tm.assert_series_equal(result, exp) | pandas.util.testing.assert_series_equal |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data."""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple enumerating EIA 860 tables for which PUDL's ETL works."""
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CC': 'Combined-Cycle, Total Unit',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'N': 'New Contract - see NC code. This abbreviation existed only in 2008 before being replaced by NC.',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OT': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [
# base cols
['plant_id_eia'],
# static cols
['balancing_authority_code_eia', 'balancing_authority_name_eia',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude', 'service_area',
'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'datum', 'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'nerc_region', 'net_metering',
'pipeline_notes', 'regulatory_status_code',
'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
],
'generators': [
# base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'owned_by_non_utility', 'deliver_power_transgrid',
'summer_capacity_mw', 'winter_capacity_mw', 'summer_capacity_estimate',
'winter_capacity_estimate', 'minimum_load_mw', 'distributed_generation',
'technology_description', 'reactive_power_output_mvar',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'energy_source_1_transport_1', 'energy_source_1_transport_2',
'energy_source_1_transport_3', 'energy_source_2_transport_1',
'energy_source_2_transport_2', 'energy_source_2_transport_3',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date',
'utility_id_eia', 'data_source'],
# need type fixing
{}
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [
# base cols
['utility_id_eia'],
# static cols
['utility_name_eia'],
# annual cols
['street_address', 'city', 'state', 'zip_code', 'entity_type',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
'attention_line', 'address_2', 'zip_code_4',
'contact_firstname', 'contact_lastname', 'contact_title',
'contact_firstname_2', 'contact_lastname_2', 'contact_title_2',
'phone_extension_1', 'phone_extension_2', 'phone_number_1',
'phone_number_2'],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [
# base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{},
]
}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
'ferc714',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2020)),
'eia861': tuple(range(1990, 2020)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2021)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2020)),
'ferc714': (None, ),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_partitions = {
'eia860': {
'years': tuple(range(2004, 2020))
},
'eia860m': {
'year_month': '2020-11'
},
'eia861': {
'years': tuple(range(2001, 2020))
},
'eia923': {
'years': tuple(range(2001, 2020))
},
'epacems': {
'years': tuple(range(1995, 2021)),
'states': tuple(cems_states.keys())},
'ferc1': {
'years': tuple(range(1994, 2020))
},
'ferc714': {},
}
"""
dict: A dictionary of data sources (keys) and dictionaries (values) of names of
partition type (sub-key) and paritions (sub-value) containing the paritions
such as tuples of years for each data source that are able to be ingested
into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': (
"service_territory_eia861",
"balancing_authority_eia861",
"sales_eia861",
"advanced_metering_infrastructure_eia861",
"demand_response_eia861",
"demand_side_management_eia861",
"distributed_generation_eia861",
"distribution_systems_eia861",
"dynamic_pricing_eia861",
"energy_efficiency_eia861",
"green_pricing_eia861",
"mergers_eia861",
"net_metering_eia861",
"non_net_metering_eia861",
"operational_data_eia861",
"reliability_eia861",
"utility_data_eia861",
),
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
),
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "Catalyst Cooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'ferc714': [
'electricity', 'electric', 'utility', 'planning area', 'form 714',
'balancing authority', 'demand', 'system lambda', 'ferc',
'federal energy regulatory commission', "hourly", "generation",
"interchange", "forecast", "load", "adjacency", "plants",
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
ENTITY_TYPE_DICT = {
'M': 'Municipal',
'C': 'Cooperative',
'R': 'Retail Power Marketer',
'I': 'Investor Owned',
'P': 'Political Subdivision',
'T': 'Transmission',
'S': 'State',
'W': 'Wholesale Power Marketer',
'F': 'Federal',
'A': 'Municipal Mktg Authority',
'G': 'Community Choice Aggregator',
'D': 'Nonutility DSM Administrator',
'B': 'Behind the Meter',
'Q': 'Independent Power Producer',
'IND': 'Industrial',
'COM': 'Commercial',
'PR': 'Private', # Added by AES for OD table (Arbitrary moniker)
'PO': 'Power Marketer', # Added by AES for OD table
'U': 'Unknown', # Added by AES for OD table
'O': 'Other' # Added by AES for OD table
}
# Confirm these designations -- educated guess based on the form instructions
MOMENTARY_INTERRUPTION_DEF = { # Added by AES for R table
'L': 'Less than 1 minute',
'F': 'Less than or equal to 5 minutes',
'O': 'Other',
}
# https://www.eia.gov/electricity/data/eia411/#tabs_NERC-3
RECOGNIZED_NERC_REGIONS = [
'BASN', # ASSESSMENT AREA Basin (WECC)
'CALN', # ASSESSMENT AREA California (WECC)
'CALS', # ASSESSMENT AREA California (WECC)
'DSW', # ASSESSMENT AREA Desert Southwest (WECC)
'ASCC', # Alaska
'ISONE', # ISO New England (NPCC)
'ERCOT', # lumped under TRE in 2017 Form instructions
'NORW', # ASSESSMENT AREA Northwest (WECC)
'NYISO', # ISO (NPCC)
'PJM', # RTO
'ROCK', # ASSESSMENT AREA Rockies (WECC)
'ECAR', # OLD RE Now part of RFC and SERC
'FRCC', # included in 2017 Form instructions, recently joined with SERC
'HICC', # Hawaii
'MAAC', # OLD RE Now part of RFC
'MAIN', # OLD RE Now part of SERC, RFC, MRO
'MAPP', # OLD/NEW RE Became part of MRO, resurfaced in 2010
'MRO', # RE included in 2017 Form instructions
'NPCC', # RE included in 2017 Form instructions
'RFC', # RE included in 2017 Form instructions
'SERC', # RE included in 2017 Form instructions
'SPP', # RE included in 2017 Form instructions
'TRE', # RE included in 2017 Form instructions (included ERCOT)
'WECC', # RE included in 2017 Form instructions
'WSCC', # OLD RE pre-2002 version of WECC
'MISO', # ISO unclear whether technically a regional entity, but lots of entries
'ECAR_MAAC',
'MAPP_WECC',
'RFC_SERC',
'SPP_WECC',
'MRO_WECC',
'ERCOT_SPP',
'SPP_TRE',
'ERCOT_TRE',
'MISO_TRE',
'VI', # Virgin Islands
'GU', # Guam
'PR', # Puerto Rico
'AS', # American Samoa
'UNK',
]
CUSTOMER_CLASSES = [
"commercial",
"industrial",
"direct_connection",
"other",
"residential",
"total",
"transportation"
]
TECH_CLASSES = [
'backup', # WHERE Is this used? because removed from DG table b/c not a real component
'chp_cogen',
'combustion_turbine',
'fuel_cell',
'hydro',
'internal_combustion',
'other',
'pv',
'steam',
'storage_pv',
'all_storage', # need 'all' as prefix so as not to confuse with other storage category
'total',
'virtual_pv',
'wind',
]
REVENUE_CLASSES = [
'retail_sales',
'unbundled',
'delivery_customers',
'sales_for_resale',
'credits_or_adjustments',
'other',
'transmission',
'total',
]
RELIABILITY_STANDARDS = [
'ieee_standard',
'other_standard'
]
FUEL_CLASSES = [
'gas',
'oil',
'other',
'renewable',
'water',
'wind',
'wood',
]
RTO_CLASSES = [
'caiso',
'ercot',
'pjm',
'nyiso',
'spp',
'miso',
'isone',
'other'
]
ESTIMATED_OR_ACTUAL = {'E': 'estimated', 'A': 'actual'}
TRANSIT_TYPE_DICT = {
'CV': 'conveyer',
'PL': 'pipeline',
'RR': 'railroad',
'TK': 'truck',
'WA': 'water',
'UN': 'unknown',
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
"plant_id_ferc1": pd.Int64Dtype(),
"plant_id_pudl": pd.Int64Dtype(),
"report_date": "datetime64[ns]",
"report_year": pd.Int64Dtype(),
"utility_id_ferc1": pd.Int64Dtype(),
"utility_id_pudl": pd.Int64Dtype(),
},
"ferc714": { # INCOMPLETE
"demand_mwh": float,
"demand_annual_mwh": float,
"eia_code": pd.Int64Dtype(),
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
"report_date": "datetime64[ns]",
"respondent_id_ferc714": pd.Int64Dtype(),
"respondent_name_ferc714": pd.StringDtype(),
"respondent_type": pd.CategoricalDtype(categories=[
"utility", "balancing_authority",
]),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'actual_peak_demand_savings_mw': float, # Added by AES for DR table
'address_2': pd.StringDtype(), # Added by AES for 860 utilities table
'advanced_metering_infrastructure': pd.Int64Dtype(), # Added by AES for AMI table
# Added by AES for UD misc table
'alternative_fuel_vehicle_2_activity': pd.BooleanDtype(),
'alternative_fuel_vehicle_activity': pd.BooleanDtype(),
'annual_indirect_program_cost': float,
'annual_total_cost': float,
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'attention_line': pd.StringDtype(),
'automated_meter_reading': pd.Int64Dtype(), # Added by AES for AMI table
'backup_capacity_mw': float, # Added by AES for NNM & DG misc table
'balancing_authority_code_eia': pd.CategoricalDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name_eia': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bunded_activity': pd.BooleanDtype(),
'business_model': pd.CategoricalDtype(categories=[
"retail", "energy_services"]),
'buy_distribution_activity': pd.BooleanDtype(),
'buying_transmission_activity': pd.BooleanDtype(),
'bypass_heat_recovery': pd.BooleanDtype(),
'caidi_w_major_event_days_minus_loss_of_service_minutes': float,
'caidi_w_major_event_dats_minutes': float,
'caidi_wo_major_event_days_minutes': float,
'capacity_mw': float,
'carbon_capture': pd.BooleanDtype(),
'chlorine_content_ppm': float,
'circuits_with_voltage_optimization': pd.Int64Dtype(),
'city': pd.StringDtype(),
'cofire_fuels': pd.BooleanDtype(),
'consumed_by_facility_mwh': float,
'consumed_by_respondent_without_charge_mwh': float,
'contact_firstname': pd.StringDtype(),
'contact_firstname_2': pd.StringDtype(),
'contact_lastname': pd.StringDtype(),
'contact_lastname_2': pd.StringDtype(),
'contact_title': pd.StringDtype(),
'contact_title_2': pd.StringDtype(),
'contract_expiration_date': 'datetime64[ns]',
'contract_type_code': pd.StringDtype(),
'county': pd.StringDtype(),
'county_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'credits_or_adjustments': float,
'critical_peak_pricing': pd.BooleanDtype(),
'critical_peak_rebate': pd.BooleanDtype(),
'current_planned_operating_date': 'datetime64[ns]',
'customers': float,
'customer_class': pd.CategoricalDtype(categories=CUSTOMER_CLASSES),
'customer_incentives_cost': float,
'customer_incentives_incremental_cost': float,
'customer_incentives_incremental_life_cycle_cost': float,
'customer_other_costs_incremental_life_cycle_cost': float,
'daily_digital_access_customers': pd.Int64Dtype(),
'data_observed': pd.BooleanDtype(),
'datum': pd.StringDtype(),
'deliver_power_transgrid': pd.BooleanDtype(),
'delivery_customers': float,
'direct_load_control_customers': pd.Int64Dtype(),
'distributed_generation': pd.BooleanDtype(),
'distributed_generation_owned_capacity_mw': float,
'distribution_activity': pd.BooleanDtype(),
'distribution_circuits': pd.Int64Dtype(),
'duct_burners': pd.BooleanDtype(),
'energy_displaced_mwh': float,
'energy_efficiency_annual_cost': float,
'energy_efficiency_annual_actual_peak_reduction_mw': float,
'energy_efficiency_annual_effects_mwh': float,
'energy_efficiency_annual_incentive_payment': float,
'energy_efficiency_incremental_actual_peak_reduction_mw': float,
'energy_efficiency_incremental_effects_mwh': float,
'energy_savings_estimates_independently_verified': pd.BooleanDtype(),
'energy_savings_independently_verified': pd.BooleanDtype(),
'energy_savings_mwh': float,
'energy_served_ami_mwh': float,
'energy_source_1_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_code': pd.StringDtype(),
'energy_source_code_1': pd.StringDtype(),
'energy_source_code_2': pd.StringDtype(),
'energy_source_code_3': pd.StringDtype(),
'energy_source_code_4': pd.StringDtype(),
'energy_source_code_5': pd.StringDtype(),
'energy_source_code_6': pd.StringDtype(),
'energy_storage': pd.BooleanDtype(),
'entity_type': pd.CategoricalDtype(categories=ENTITY_TYPE_DICT.values()),
'estimated_or_actual_capacity_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_fuel_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_tech_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'exchange_energy_delivered_mwh': float,
'exchange_energy_recieved_mwh': float,
'ferc_cogen_docket_no': pd.StringDtype(),
'ferc_cogen_status': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator_docket_no': pd.StringDtype(),
'ferc_small_power_producer': pd.BooleanDtype(),
'ferc_small_power_producer_docket_no': pd.StringDtype(),
'fluidized_bed_tech': pd.BooleanDtype(),
'fraction_owned': float,
'fuel_class': pd.StringDtype(),
'fuel_consumed_for_electricity_mmbtu': float,
'fuel_consumed_for_electricity_units': float,
'fuel_consumed_mmbtu': float,
'fuel_consumed_units': float,
'fuel_cost_per_mmbtu': float,
'fuel_group_code': pd.StringDtype(),
'fuel_group_code_simple': pd.StringDtype(),
'fuel_mmbtu_per_unit': float,
'fuel_pct': float,
'fuel_qty_units': float,
# are fuel_type and fuel_type_code the same??
# fuel_type includes 40 code-like things.. WAT, SUN, NUC, etc.
'fuel_type': pd.StringDtype(),
# from the boiler_fuel_eia923 table, there are 30 code-like things, like NG, BIT, LIG
'fuel_type_code': pd.StringDtype(),
'fuel_type_code_aer': pd.StringDtype(),
'fuel_type_code_pudl': pd.StringDtype(),
'furnished_without_charge_mwh': float,
'generation_activity': pd.BooleanDtype(),
# this is a mix of integer-like values (2 or 5) and strings like AUGSF
'generator_id': pd.StringDtype(),
'generators_number': float,
'generators_num_less_1_mw': float,
'green_pricing_revenue': float,
'grid_voltage_2_kv': float,
'grid_voltage_3_kv': float,
'grid_voltage_kv': float,
'heat_content_mmbtu_per_unit': float,
'highest_distribution_voltage_kv': float,
'home_area_network': pd.Int64Dtype(),
'inactive_accounts_included': pd.BooleanDtype(),
'incremental_energy_savings_mwh': float,
'incremental_life_cycle_energy_savings_mwh': float,
'incremental_life_cycle_peak_reduction_mwh': float,
'incremental_peak_reduction_mw': float,
'iso_rto_code': pd.StringDtype(),
'latitude': float,
'liquefied_natural_gas_storage': pd.BooleanDtype(),
'load_management_annual_cost': float,
'load_management_annual_actual_peak_reduction_mw': float,
'load_management_annual_effects_mwh': float,
'load_management_annual_incentive_payment': float,
'load_management_annual_potential_peak_reduction_mw': float,
'load_management_incremental_actual_peak_reduction_mw': float,
'load_management_incremental_effects_mwh': float,
'load_management_incremental_potential_peak_reduction_mw': float,
'longitude': float,
'major_program_changes': pd.BooleanDtype(),
'mercury_content_ppm': float,
'merge_address': pd.StringDtype(),
'merge_city': pd.StringDtype(),
'merge_company': pd.StringDtype(),
'merge_date': 'datetime64[ns]',
'merge_state': pd.StringDtype(),
'mine_id_msha': pd.Int64Dtype(),
'mine_id_pudl': pd.Int64Dtype(),
'mine_name': pd.StringDtype(),
'mine_type_code': pd.StringDtype(),
'minimum_load_mw': float,
'moisture_content_pct': float,
'momentary_interruption_definition': pd.CategoricalDtype(categories=MOMENTARY_INTERRUPTION_DEF.values()),
'multiple_fuels': pd.BooleanDtype(),
'nameplate_power_factor': float,
'natural_gas_delivery_contract_type_code': pd.StringDtype(),
'natural_gas_local_distribution_company': pd.StringDtype(),
'natural_gas_pipeline_name_1': pd.StringDtype(),
'natural_gas_pipeline_name_2': pd.StringDtype(),
'natural_gas_pipeline_name_3': pd.StringDtype(),
'natural_gas_storage': pd.BooleanDtype(),
'natural_gas_transport_code': pd.StringDtype(),
'nerc_region': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'nerc_regions_of_operation': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'net_generation_mwh': float,
'net_metering': pd.BooleanDtype(),
'net_power_exchanged_mwh': float,
'net_wheeled_power_mwh': float,
'new_parent': pd.StringDtype(),
'non_amr_ami': pd.Int64Dtype(),
'nuclear_unit_id': pd.Int64Dtype(),
'operates_generating_plant': pd.BooleanDtype(),
'operating_date': 'datetime64[ns]',
'operating_switch': pd.StringDtype(),
# TODO: double check this for early 860 years
'operational_status': pd.StringDtype(),
'operational_status_code': pd.StringDtype(),
'original_planned_operating_date': 'datetime64[ns]',
'other': float,
'other_combustion_tech': pd.BooleanDtype(),
'other_costs': float,
'other_costs_incremental_cost': float,
'other_modifications_date': 'datetime64[ns]',
'other_planned_modifications': pd.BooleanDtype(),
'outages_recorded_automatically': pd.BooleanDtype(),
'owned_by_non_utility': pd.BooleanDtype(),
'owner_city': pd.StringDtype(),
'owner_name': pd.StringDtype(),
'owner_state': pd.StringDtype(),
'owner_street_address': pd.StringDtype(),
'owner_utility_id_eia': pd.Int64Dtype(),
'owner_zip_code': pd.StringDtype(),
# we should transition these into readable codes, not a one letter thing
'ownership_code': pd.StringDtype(),
'phone_extension_1': pd.StringDtype(),
'phone_extension_2': pd.StringDtype(),
'phone_number_1': pd.StringDtype(),
'phone_number_2': pd.StringDtype(),
'pipeline_notes': pd.StringDtype(),
'planned_derate_date': 'datetime64[ns]',
'planned_energy_source_code_1': pd.StringDtype(),
'planned_modifications': pd.BooleanDtype(),
'planned_net_summer_capacity_derate_mw': float,
'planned_net_summer_capacity_uprate_mw': float,
'planned_net_winter_capacity_derate_mw': float,
'planned_net_winter_capacity_uprate_mw': float,
'planned_new_capacity_mw': float,
'planned_new_prime_mover_code': pd.StringDtype(),
'planned_repower_date': 'datetime64[ns]',
'planned_retirement_date': 'datetime64[ns]',
'planned_uprate_date': 'datetime64[ns]',
'plant_id_eia': pd.Int64Dtype(),
'plant_id_epa': pd.Int64Dtype(),
'plant_id_pudl': pd.Int64Dtype(),
'plant_name_eia': pd.StringDtype(),
'plants_reported_asset_manager': pd.BooleanDtype(),
'plants_reported_operator': pd.BooleanDtype(),
'plants_reported_other_relationship': pd.BooleanDtype(),
'plants_reported_owner': pd.BooleanDtype(),
'point_source_unit_id_epa': pd.StringDtype(),
'potential_peak_demand_savings_mw': float,
'pulverized_coal_tech': pd.BooleanDtype(),
'previously_canceled': pd.BooleanDtype(),
'price_responsive_programes': pd.BooleanDtype(),
'price_responsiveness_customers': pd.Int64Dtype(),
'primary_transportation_mode_code': pd.StringDtype(),
'primary_purpose_naics_id': pd.Int64Dtype(),
'prime_mover_code': pd.StringDtype(),
'pv_current_flow_type': pd.CategoricalDtype(categories=['AC', 'DC']),
'reactive_power_output_mvar': float,
'real_time_pricing_program': pd.BooleanDtype(),
'rec_revenue': float,
'rec_sales_mwh': float,
'regulatory_status_code': pd.StringDtype(),
'report_date': 'datetime64[ns]',
'reported_as_another_company': pd.StringDtype(),
'retail_marketing_activity': pd.BooleanDtype(),
'retail_sales': float,
'retail_sales_mwh': float,
'retirement_date': 'datetime64[ns]',
'revenue_class': pd.CategoricalDtype(categories=REVENUE_CLASSES),
'rto_iso_lmp_node_id': pd.StringDtype(),
'rto_iso_location_wholesale_reporting_id': pd.StringDtype(),
'rtos_of_operation': pd.StringDtype(),
'saidi_w_major_event_dats_minus_loss_of_service_minutes': float,
'saidi_w_major_event_days_minutes': float,
'saidi_wo_major_event_days_minutes': float,
'saifi_w_major_event_days_customers': float,
'saifi_w_major_event_days_minus_loss_of_service_customers': float,
'saifi_wo_major_event_days_customers': float,
'sales_for_resale': float,
'sales_for_resale_mwh': float,
'sales_mwh': float,
'sales_revenue': float,
'sales_to_ultimate_consumers_mwh': float,
'secondary_transportation_mode_code': pd.StringDtype(),
'sector_id': pd.Int64Dtype(),
'sector_name': pd.StringDtype(),
'service_area': pd.StringDtype(),
'service_type': pd.CategoricalDtype(categories=[
"bundled", "energy", "delivery",
]),
'short_form': pd.BooleanDtype(),
'sold_to_utility_mwh': float,
'solid_fuel_gasification': pd.BooleanDtype(),
'data_source': pd.StringDtype(),
'standard': pd.CategoricalDtype(categories=RELIABILITY_STANDARDS),
'startup_source_code_1': pd.StringDtype(),
'startup_source_code_2': pd.StringDtype(),
'startup_source_code_3': pd.StringDtype(),
'startup_source_code_4': pd.StringDtype(),
'state': pd.StringDtype(),
'state_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'street_address': pd.StringDtype(),
'stoker_tech': pd.BooleanDtype(),
'storage_capacity_mw': float,
'storage_customers': pd.Int64Dtype(),
'subcritical_tech': pd.BooleanDtype(),
'sulfur_content_pct': float,
'summer_capacity_mw': float,
'summer_capacity_estimate': pd.BooleanDtype(),
# TODO: check if there is any data pre-2016
'summer_estimated_capability_mw': float,
'summer_peak_demand_mw': float,
'supercritical_tech': pd.BooleanDtype(),
'supplier_name': pd.StringDtype(),
'switch_oil_gas': pd.BooleanDtype(),
'syncronized_transmission_grid': pd.BooleanDtype(),
# Added by AES for NM & DG tech table (might want to consider merging with another fuel label)
'tech_class': pd.CategoricalDtype(categories=TECH_CLASSES),
'technology_description': pd.StringDtype(),
'time_cold_shutdown_full_load_code': pd.StringDtype(),
'time_of_use_pricing_program': pd.BooleanDtype(),
'time_responsive_programs': pd.BooleanDtype(),
'time_responsiveness_customers': pd.Int64Dtype(),
'timezone': pd.StringDtype(),
'topping_bottoming_code': pd.StringDtype(),
'total': float,
'total_capacity_less_1_mw': float,
'total_meters': pd.Int64Dtype(),
'total_disposition_mwh': float,
'total_energy_losses_mwh': float,
'total_sources_mwh': float,
'transmission': float,
'transmission_activity': pd.BooleanDtype(),
'transmission_by_other_losses_mwh': float,
'transmission_distribution_owner_id': pd.Int64Dtype(),
'transmission_distribution_owner_name': pd.StringDtype(),
'transmission_distribution_owner_state': pd.StringDtype(),
'turbines_inverters_hydrokinetics': float,
'turbines_num': pd.Int64Dtype(), # TODO: check if any turbines show up pre-2016
'ultrasupercritical_tech': pd.BooleanDtype(),
'unbundled_revenues': float,
'unit_id_eia': pd.StringDtype(),
'unit_id_pudl': pd.Int64Dtype(),
'uprate_derate_completed_date': 'datetime64[ns]',
'uprate_derate_during_year': pd.BooleanDtype(),
'utility_id_eia': pd.Int64Dtype(),
'utility_id_pudl': pd.Int64Dtype(),
'utility_name_eia': pd.StringDtype(),
'utility_owned_capacity_mw': float, # Added by AES for NNM table
'variable_peak_pricing_program': pd.BooleanDtype(), # Added by AES for DP table
'virtual_capacity_mw': float, # Added by AES for NM table
'virtual_customers': pd.Int64Dtype(), # Added by AES for NM table
'water_heater': | pd.Int64Dtype() | pandas.Int64Dtype |
"""Create the vaccination groups and the vaccination rank.
Vaccination Groups
==================
1 = Highest Priority
--------------------
- overall 8.6 Mio individuals = ~10% of the population
- 1% live in nursing homes (https://bit.ly/3vFsByz) and not covered in our data
=> target of 9%
- over 80 year olds -> 4% of our synthetic population
- individuals working in nursing homes and outpatient nursing
- 796 489 in nursing homes
- 421 550 in outpaiton nursing
- source: https://bit.ly/3vzGLBj
=> 1.5% of the population.
=> We increase this to 4.6% of the population to include other
groups such as ICU staff. To achieve this share for the overall
population we set the work_contact_priority to 0.9.
With this we also reach the 9% target for the highest priority group.
2 = Very High Priority (2nd and 3rd group acc. to STIKO)
--------------------------------------------------------
- approx. 14% of the population acc. to RKI without educators.
=> 15% abstracting 1% nursing home population.
- 70 to 80 year olds
- close contacts of very high risk individuals
- individuals with other dangerous preconditions
- more medical workers
=> we model this as age group 50-70 gets 2/3 of the spots and
1/3 goes to age group 20-50.
In addition nursery, preschool and primary teachers were moved to this group.
They are about 1% of our synthetic population.
=> target share of 16%
3 = High Priority (4th+5th category acc. to STIKO)
--------------------------------------------------
- 6.9 mio in 4th group + 9 mio in 5th group (~19%)
= ~18% of population without the already vaccinated teachers
=> ~19% abstracting 1% nursing home population.
- 60 to 70 year olds
- other teachers
- many essential workers (police, fire fighters ...)
- people with preconditions that make them more susceptible to covid.
- close contacts of people with dangerous preconditions
Preconditions in this group include diabetes, hypertension, cancer, asthma, auto-immune
disease
=> We expect a higher share among older individuals.
4 = The General Adult Population
---------------------------------
Approximately 45 mio people ~ 56% of the population.
=> 57% abstracting 1% nursing home population
5 = Youths and 6 = Children
----------------------------
Youths will be vaccinated after the general population because in the
beginning the vaccines were not allowed to be given to children.
First, 12 to 16 year olds will be vaccinated, then children under 12.
We assume that the shares refer to the adult population without children.
References
----------
- https://bit.ly/3rekfdL (RKI Stiko Empfehlung)
- https://bit.ly/3tNF01G
- https://www.tagesschau.de/inland/impfungen-lehrer-101.html
- shares of each group: https://bit.ly/3cb5uUQ
"""
import numpy as np
import pandas as pd
def create_vaccination_rank(vaccination_group, share_refuser, seed):
"""Create the order in which individuals get vaccinated, including refusers.
Args:
vaccination_group (pandas.Series): index is the same as that of states.
Low values indicate individuals that have a high priority to be
vaccinated.
share_refuser (float): share of individuals (irrespective of their
vaccination group) that refuse to be vaccinated.
.. warning::
This share must also be passed to the vaccination model!
seed (int)
Returns:
vaccination_order (pandas.Series): same index as that of
vaccination_group. Takes values between 0 and 1. Low values
correspond to individuals that get vaccinated earlier. Refusers
receive the highest values but cannot be distinguished from the
rest.
"""
np.random.seed(seed)
sampled_to_refuse = np.random.choice(
a=[True, False],
size=len(vaccination_group),
p=[share_refuser, 1 - share_refuser],
)
refuser_value = vaccination_group.max() + 1
with_refusers = vaccination_group.where(~sampled_to_refuse, refuser_value)
vaccination_order = with_refusers.rank(method="first", pct=True)
min_at_zero = vaccination_order - vaccination_order.min()
scaled = min_at_zero / min_at_zero.max()
return scaled
def create_vaccination_group(states, seed):
"""Put individuals into vaccination priority groups based on age and work.
Args:
states (pandas.DataFrame): states DataFrame. Must contain as columns:
"age", "work_contact_priority", "educ_worker", "school_group_id_0",
and "occupation".
seed (int): seed
Returns:
vaccination_group (pandas.Series): index is the same as states.
Values go from 1 (highest priority) to 4 (lowest priority).
This is irrespective of individuals refuse to get vaccinated or not.
"""
np.random.seed(seed)
is_adult = states["age"] >= 18
vaccination_group = | pd.Series(np.nan, index=states.index) | pandas.Series |
import numpy as np
import pandas as pd
import pickle
import time
import random
import os
from sklearn import linear_model, model_selection, ensemble
from sklearn.svm import SVC
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.base import clone
from sklearn import metrics
from sklearn.model_selection import cross_validate, train_test_split, StratifiedKFold
import sklearn.metrics as m
from joblib import Parallel, delayed
from sklearn.base import clone
from sklearn.utils import shuffle, resample
type_='marker'
basename = type_+'_features_expired_prediction_'
dir_ = '../../data/'
t0_all=time.time()
seed = 42
np.random.seed(seed)
max_depth = 1
C=1
tol=1e-3
min_samples_leaf=2
min_samples_split=2
n_estimators=100
models = {
"Logistic Regression" : linear_model.LogisticRegression(
C=C,
penalty='l1',
solver="liblinear",
tol=tol,
random_state=seed)
}
classification_metrics = ['roc_auc']
cv_split = 10
test_size = 0.15
n_jobs = 25
nboot=200
X_all_proteins = pd.read_csv(dir_+'integrated_X_raw_all_proteins.csv',index_col=0)
proteins_no_immunoglobulins = pickle.load(open(dir_+'proteins_no_immunoglobulins.pkl','rb'))
X_all_proteins = X_all_proteins.loc[:,proteins_no_immunoglobulins]
joined = pd.read_csv(dir_+'mortality_X_y.csv',index_col=0)
X_all_clinical = pd.read_csv(dir_+'integrated_X_clinical_and_cohort_covariates.csv',index_col=0)
Y_pgd = pd.read_csv(dir_+'integrated_pgd_y.csv',index_col=0,header=None)
Y_pgd.columns = ['PGD']
X_all_clinical = X_all_clinical.join(Y_pgd)
Y_mortality = joined[['expired']]
Y_mortality.index.name=''
X_all_clinical = X_all_clinical.join(Y_mortality)
Y_lvad = joined[['Mechanical_Support_Y']]
Y_lvad.index.name=''
Y_survival = (joined[['expired']]==0).astype(int)
Y_survival.columns = ['Survival']
Y_survival.index.name=''
X_all_clinical = X_all_clinical.join(Y_survival)
idmap_sub = pd.read_csv(dir_+'protein_gene_map_full.csv')[['Protein','Gene_name']].dropna()
cov_df = X_all_clinical.loc[:,['Cohort_Columbia','Cohort_Cedar']].copy().astype(int)
all_cov_df = cov_df.copy()
all_cov_df.loc[:,'Cohort_Paris'] = (
(all_cov_df['Cohort_Columbia'] +
all_cov_df['Cohort_Cedar'])==0).astype(int)
params = {'Y' : Y_survival, 'cv_split' : cv_split,
'metrics' : classification_metrics, 'n_jobs' : 1,
'test_size' : test_size,
'retrained_models' : True, 'patient_level_predictions' : True}
def permute(Y,seed=42):
"""
shuffle sample values
Parameters:
----------
Y : pandas series
Index of samples and values are their class labels
seed : int
Random seed for shuffling
Returns:
------
arr_shuffle: pandas series
A shuffled Y
"""
arr = shuffle(Y.values,random_state=seed)
arr_shuffle = (pd.Series(arr.reshape(1,-1)[0],index=Y.index))
return arr_shuffle
def observed_val(X,Y,models,metrics=['roc_auc'],cv_split=10,seed=42,test_size=0.15,return_train_score=False,n_jobs=1,retrained_models=False,patient_level_predictions=False,return_estimator=True):
# make sure given metrics are in list and not one metric is given as a string
if type(metrics)!=list:
metrics = [metrics]
# 1/ reindex
X = X.loc[Y.index]
dfs = []
model_retrained_fits = {}
model_confs = []
#iterate through model dictionary
for name,mod in models.items():
# /2 generate model parameters and fold scores with cv splitter
fit = clone(mod).fit(X,Y.values.reshape(1,-1)[0])
conf = pd.DataFrame({'y_true' : Y.values.reshape(1,-1)[0],'y_pred' : fit.predict(X),'bootstrap' : 'observed','model' : np.repeat(name,len(Y.index))},index=Y.index)
model_confs.append(conf)
#do prediction for each metric
tmp = pd.DataFrame({'model' : name,'bootstrap' : 'observed'},index=[0])
for metric in metrics:
tmp[metric] = m.SCORERS[metric](fit,X,Y)
model_retrained_fits[name] = fit
dfs.append(tmp)
return pd.concat(dfs).reset_index(drop=True), model_retrained_fits, pd.concat(model_confs)
def resample_observed_val(X,Y,models,metrics=['roc_auc'],cv_split=10,seed=42,test_size=0.15,return_train_score=False,n_jobs=1,retrained_models=False,patient_level_predictions=False,return_estimator=True):
# make sure given metrics are in list and not one metric is given as a string
if type(metrics)!=list:
metrics = [metrics]
# 1/ reindex
X = X.loc[Y.index]
Y_resample = resample(Y,random_state=seed)
X = X.loc[Y_resample.index]
Y = Y_resample.copy()
dfs = []
model_retrained_fits = {}
model_confs = []
#iterate through model dictionary
for name,mod in models.items():
# /2 generate model parameters and fold scores with cv splitter
fit = clone(mod).fit(X,Y.values.reshape(1,-1)[0])
conf = pd.DataFrame({'y_true' : Y.values.reshape(1,-1)[0],'y_pred' : fit.predict(X),'bootstrap' : 'observed','model' : np.repeat(name,len(Y.index))},index=Y.index)
model_confs.append(conf)
#do prediction for each metric
tmp = pd.DataFrame({'model' : name,'bootstrap' : 'observed'},index=[0])
for metric in metrics:
tmp[metric] = m.SCORERS[metric](fit,X,Y)
model_retrained_fits[name] = fit
dfs.append(tmp)
return pd.concat(dfs).reset_index(drop=True), model_retrained_fits, pd.concat(model_confs)
def permuted_observed_val(X,Y,models,metrics=['roc_auc'],cv_split=10,seed=42,test_size=0.15,return_train_score=False,n_jobs=1,retrained_models=False,patient_level_predictions=False,return_estimator=True):
# make sure given metrics are in list and not one metric is given as a string
if type(metrics)!=list:
metrics = [metrics]
# 1/ reindex
X = X.loc[Y.index]
Y_shuffle = permute(Y,seed=seed)
X = X.loc[Y_shuffle.index]
Y = Y_shuffle.copy()
dfs = []
model_retrained_fits = {}
model_confs = []
#iterate through model dictionary
for name,mod in models.items():
# /2 generate model parameters and fold scores with cv splitter
fit = clone(mod).fit(X,Y.values.reshape(1,-1)[0])
conf = pd.DataFrame({'y_true' : Y.values.reshape(1,-1)[0],'y_pred' : fit.predict(X),'bootstrap' : 'observed','model' : np.repeat(name,len(Y.index))},index=Y.index)
model_confs.append(conf)
#do prediction for each metric
tmp = pd.DataFrame({'model' : name,'bootstrap' : 'observed'},index=[0])
for metric in metrics:
tmp[metric] = m.SCORERS[metric](fit,X,Y)
model_retrained_fits[name] = fit
dfs.append(tmp)
return pd.concat(dfs).reset_index(drop=True), model_retrained_fits, pd.concat(model_confs)
def train_test_val_top_fold_01_within(X,Y,models,metrics=['roc_auc'],cv_split=10,seed=42,test_size=0.15,return_train_score=True,n_jobs=1,retrained_models=False,patient_level_predictions=False,return_estimator=True):
# make sure given metrics are in list and not one metric given as a string
if type(metrics)!=list:
metrics = [metrics]
# 1/ train and test split
X = X.loc[Y.index]
X_train, X_test, y_train, y_test = train_test_split(X,Y,
test_size=test_size,
random_state=seed,
stratify=Y,
shuffle=True)
X_train = X_train.apply(lambda x : (x - min(x))/(max(x) - min(x)),axis=0)
X_test = X_test.apply(lambda x : (x - min(x))/(max(x) - min(x)),axis=0)
X_train[X_train.isna()]=0
X_test[X_test.isna()]=0
#define K fold splitter
cv = StratifiedKFold(n_splits=cv_split,random_state=seed,shuffle=True)
#Instantiate lists to collect prediction and model results
dfs = []
model_retrained_fits = {}
model_confs = []
#iterate through model dictionary
for name,mod in models.items():
# /2 generate model parameters and fold scores with cv splitter
fit = cross_validate(clone(mod),X_train,y_train.values.reshape(1,-1)[0],cv=cv,scoring=metrics,
n_jobs=n_jobs,return_train_score=return_train_score,
return_estimator=return_estimator)
tmp = pd.DataFrame({'fold' : range(cv_split),
'model' : name},
index=range(cv_split))
#populate scores in dataframe
cols = [k for k in fit.keys() if (k.find('test')+k.find('train'))==-1]
for col in cols:
tmp[col] = fit[col]
# /3 Identify best performing model
top_fold = np.where(fit['test_roc_auc']==fit['test_roc_auc'].max())[0][0]
keys = [x for x in fit.keys()]
vals = [fit[x][top_fold] for x in keys]
top_model_key_vals = {}
for i in range(len(vals)):
top_model_key_vals[keys[i]] = vals[i]
#4/ train models on training set
# also get sample level predictions
f = top_model_key_vals['estimator']
fitted = clone(f).fit(X_train,y_train.values.reshape(1,-1)[0])
conf = pd.DataFrame({'y_true' : y_test.values.reshape(1,-1)[0],
'y_pred' : fitted.predict(X_test),
'y_proba' : fitted.predict_proba(X_test)[:,1],
'bootstrap' : np.repeat(seed,len(y_test.index)),
'model' : np.repeat(name,len(y_test.index))},
index=y_test.index)
model_confs.append(conf)
#do prediction for each metric
for metric in metrics:
tmp['validation_'+metric] = m.SCORERS[metric](fitted,X_test,y_test)
model_retrained_fits[name] = fitted
dfs.append(tmp.query('fold==@top_fold').drop('fold',1))
return pd.concat(dfs,sort=True).reset_index(drop=True), model_retrained_fits, pd.concat(model_confs,sort=True)
def permuted_train_test_val_top_fold_01_within(X,Y,models,metrics=['roc_auc'],cv_split=10,seed=42,test_size=0.15,return_train_score=True,n_jobs=1,retrained_models=False,patient_level_predictions=False,return_estimator=True):
X = X.loc[Y.index]
Y_shuffle = permute(Y,seed=seed)
X_shuffle = X.loc[Y_shuffle.index]
# make sure given metrics are in list and not one metric given as a string
if type(metrics)!=list:
metrics = [metrics]
# 1/ train and test split
X_train, X_test, y_train, y_test = train_test_split(X_shuffle,Y_shuffle,
test_size=test_size,
random_state=seed,
stratify=Y,
shuffle=True)
X_train = X_train.apply(lambda x : (x - min(x))/(max(x) - min(x)),axis=0)
X_test = X_test.apply(lambda x : (x - min(x))/(max(x) - min(x)),axis=0)
X_train[X_train.isna()]=0
X_test[X_test.isna()]=0
#define K fold splitter
cv = StratifiedKFold(n_splits=cv_split,random_state=seed,shuffle=True)
#Instantiate lists to collect prediction and model results
dfs = []
model_retrained_fits = {}
model_confs = []
#iterate through model dictionary
for name,mod in models.items():
# /2 generate model parameters and fold scores with cv splitter
fit = cross_validate(clone(mod),X_train,y_train.values.reshape(1,-1)[0],cv=cv,scoring=metrics,
n_jobs=n_jobs,return_train_score=return_train_score,
return_estimator=return_estimator)
tmp = pd.DataFrame({'fold' : range(cv_split),
'model' : name},
index=range(cv_split))
#populate scores in dataframe
cols = [k for k in fit.keys() if (k.find('test')+k.find('train'))==-1]
for col in cols:
tmp[col] = fit[col]
# /3 Identify best performing model
top_fold = np.where(fit['test_roc_auc']==fit['test_roc_auc'].max())[0][0]
keys = [x for x in fit.keys()]
vals = [fit[x][top_fold] for x in keys]
top_model_key_vals = {}
for i in range(len(vals)):
top_model_key_vals[keys[i]] = vals[i]
#4/ train models on training set
# also get sample level predictions
f = top_model_key_vals['estimator']
fitted = clone(f).fit(X_train,y_train.values.reshape(1,-1)[0])
conf = pd.DataFrame({'y_true' : y_test.values.reshape(1,-1)[0],
'y_pred' : fitted.predict(X_test),
'y_proba' : fitted.predict_proba(X_test)[:,1],
'bootstrap' : np.repeat(seed,len(y_test.index)),
'model' : np.repeat(name,len(y_test.index))},
index=y_test.index)
model_confs.append(conf)
#do prediction for each metric
for metric in metrics:
tmp['validation_'+metric] = m.SCORERS[metric](fitted,X_test,y_test)
model_retrained_fits[name] = fitted
dfs.append(tmp.query('fold==@top_fold').drop('fold',1))
return pd.concat(dfs,sort=True).reset_index(drop=True), model_retrained_fits, pd.concat(model_confs,sort=True)
def bootstrap_of_fcn(func=None,params={},n_jobs=4,nboot=2):
if func==None:
return "Need fcn to bootstrap"
parallel = Parallel(n_jobs=n_jobs)
return parallel(
delayed(func)(
seed=k,**params)
for k in range(nboot))
def get_performance(lst):
perf = (pd.
concat(lst,keys=range(len(lst))).
reset_index(level=1,drop=True).
rename_axis('bootstrap').
reset_index()
)
return perf
def model_feature_importances(boot_mods):
dfs = []
X = params['X'].copy()
X.loc[:,'Intercept'] = 0
for i in range(len(boot_mods)):
for j in boot_mods[i].keys():
mod = boot_mods[i][j]
coef = []
try:
coef.extend([i for i in mod.feature_importances_])
except:
coef.extend([i for i in mod.coef_[0]])
coef.extend(mod.intercept_)
fs = []
fs.extend(X.columns.values)
df = pd.DataFrame({
'Feature' : fs,
'Gene_name' : (X.T.
join(idmap_sub.
set_index('Protein'),how='left').
Gene_name.values),
'Importance' : coef,
'Model' : j,
'Bootstrap' : i
})
dfs.append(df)
return pd.concat(dfs,sort=True)
def patient_predictions(lst):
col = | pd.concat(lst) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 6 16:14:36 2021
@author: 75638
"""
import pandas as pd
def get_factor_returns_correlation(path1,path2):
"""
we need to use this function to identify the potential multicollinearity in future regression
:param factor_data: factors exposure (passing the single factor test)
:return: first we get the correlation matrix between factors in one month, then testing next month, we will get a
series of correlation matrix. Calculate the mean of absolute value of correlations for each pairs then generate this
matrix, which is what we want.
Remember: all factors being tested are in the same classification.
"""
factor_data1=pd.read_csv(path1,index_col=0).iloc[:,1:]
factor_data2= | pd.read_csv(path2,index_col=0) | pandas.read_csv |
"""
A tool to extract hourly time series of dye and volume in the segments.
"""
# imports
import matplotlib.pyplot as plt
import numpy as np
import pickle
import pandas as pd
import netCDF4 as nc
import argparse
from datetime import datetime, timedelta
import os, sys
sys.path.append(os.path.abspath('../alpha'))
import Lfun
import zrfun
import tef_fun
import flux_fun
from time import time
# get command line arguments
import argparse
parser = argparse.ArgumentParser()
# standard arguments
parser.add_argument('-g', '--gridname', nargs='?', type=str, default='cas6')
parser.add_argument('-t', '--tag', nargs='?', type=str, default='v3')
parser.add_argument('-x', '--ex_name', nargs='?', type=str, default='lo8dye')
parser.add_argument('-0', '--date_string0', nargs='?', type=str, default='2019.07.04')
parser.add_argument('-1', '--date_string1', nargs='?', type=str, default='2019.07.04')
args = parser.parse_args()
# Get Ldir
Ldir = Lfun.Lstart(args.gridname, args.tag)
Ldir['gtagex'] = Ldir['gtag'] + '_' + args.ex_name
# get time limits
ds0 = args.date_string0; ds1 = args.date_string1
Ldir['date_string0'] = ds0; Ldir['date_string1'] = ds1
dt0 = datetime.strptime(ds0, '%Y.%m.%d'); dt1 = datetime.strptime(ds1, '%Y.%m.%d')
ndays = (dt1-dt0).days + 1
print('Working on:')
outname = Ldir['gtagex'] + '_' + ds0 + '_' + ds1
print(outname +'\n')
# get list of history files to process
fn_list = Lfun.get_fn_list('hourly', Ldir, ds0, ds1)
NT = len(fn_list)
# get grid info
fn = fn_list[0]
G = zrfun.get_basic_info(fn, only_G=True)
S = zrfun.get_basic_info(fn, only_S=True)
h = G['h']
DA = G['DX'] * G['DY']
DA3 = DA.reshape((1,G['M'],G['L']))
DXu = (G['DX'][:,1:]+G['DX'][:,:-1])/2
DX3u = DXu.reshape((1,G['M'],G['L']-1))
DYv = (G['DY'][1:,:]+G['DY'][:-1,:])/2
DY3v = DYv.reshape((1,G['M']-1,G['L']))
# set input/output location
indir0 = Ldir['LOo'] + 'tef2/'
voldir = indir0 + 'volumes_' + Ldir['gridname'] + '/'
#
outdir0 = indir0 + outname + '/'
Lfun.make_dir(outdir0)
outdir = outdir0 + 'flux/'
Lfun.make_dir(outdir)
# load DataFrame of volume and associated dicts
v_df = pd.read_pickle(voldir + 'volumes.p')
bathy_dict = pickle.load(open(voldir + 'bathy_dict.p', 'rb'))
ji_dict = pickle.load(open(voldir + 'ji_dict.p', 'rb'))
seg_list = list(v_df.index)
testing = False
if testing:
verbose = True
seg_list = seg_list[-2:]
else:
verbose = False
j_dict = {}; i_dict = {}
for seg_name in seg_list:
jj = []; ii = []
ji_list_full = ji_dict[seg_name]
for ji in ji_list_full:
jj.append(ji[0])
ii.append(ji[1])
jjj = np.array(jj)
iii = np.array(ii)
j_dict[seg_name] = jjj
i_dict[seg_name] = iii
dye_df = pd.DataFrame(columns=seg_list)
v_df = | pd.DataFrame(columns=seg_list) | pandas.DataFrame |
import pandas as pd
import numpy as np
import sys
import datetime
import seaborn as sns
from tqdm import tqdm
from sklearn import tree
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from keras import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
from sklearn.utils import resample
def get_feature_balance_to_montlhypayment_percentage(loan,test):
df_cur_transactions = df_all_transactions_test if test else df_all_transactions
df_loan_transactions = df_cur_transactions.loc[df_cur_transactions['account_id'] == loan['account_id']] #get all transactions for the account of the loan
year = []
month = []
for index, transaction in df_loan_transactions.iterrows(): #gets year and month for each transaction
trans_date = datetime.datetime.strptime(str(transaction['date']), "%y%m%d")
year.append(trans_date.year)
month.append(trans_date.month)
df_loan_transactions['year'] = year
df_loan_transactions['month'] = month
df_mean_balance_bymonth = df_loan_transactions.groupby(['year','month'])['balance'].mean().reset_index(name='balance')
df_mean_balance_allmonth = df_mean_balance_bymonth['balance'].mean()
return df_mean_balance_allmonth / loan['payments']
def get_client_district_from_account(account_id):
df_disposition = df_dispositions.loc[(df_dispositions['account_id'] == account_id) & (df_dispositions['type'] == 'OWNER')] #get the disposition of the owner of the account
df_client = df_clients.loc[df_clients['client_id'] == df_disposition.iloc[0]['client_id']] #get the info of the owner of the account
return df_districts.loc[df_districts['code '] == df_client.iloc[0]['district_id']].iloc[0] #get the district info of the owner of the account
def get_feature_average_no_crimes_per_100_habitants(loan):
district = get_client_district_from_account(loan['account_id'])
no_crimes_95 = district['no. of commited crimes \'95 ']
no_crimes_96 = district['no. of commited crimes \'96 ']
no_crimes_95 = no_crimes_96 if no_crimes_95 == '?' else no_crimes_95
no_crimes_96 = no_crimes_95 if no_crimes_96 == '?' else no_crimes_96
return ((int(no_crimes_95)+int(no_crimes_96))/2)/int(district['no. of inhabitants'])*100
def get_feature_average_unemployment_rate(loan):
district = get_client_district_from_account(loan['account_id'])
unemploymant_rate_95 = district['unemploymant rate \'95 ']
unemploymant_rate_96 = district['unemploymant rate \'96 ']
unemploymant_rate_95 = unemploymant_rate_96 if unemploymant_rate_95 == '?' else unemploymant_rate_95
unemploymant_rate_96 = unemploymant_rate_95 if unemploymant_rate_96 == '?' else unemploymant_rate_96
return (float(unemploymant_rate_95)+float(unemploymant_rate_96))/2
def get_feature_proportion_avgsalary_monthlypayments(loan):
district = get_client_district_from_account(loan['account_id'])
return int(district['average salary '])/int(loan['payments'])
def get_feature_account_credit_Card_type(loan,test):
df_cur_credit_cards = df_credit_cards_test if test else df_credit_cards
df_loan_disposition = df_dispositions.loc[(df_dispositions['account_id'] == loan['account_id'])& (df_dispositions['type'] == 'OWNER')]
df_credit_card_disposition = df_cur_credit_cards.loc[df_cur_credit_cards['disp_id'] == df_loan_disposition.iloc[0]['disp_id']]
if (len(df_credit_card_disposition.index) == 1):
return df_credit_card_disposition.iloc[0]["type"]
else:
return "no credit card"
def get_feature_sex(loan):
df_loan_disposition = df_dispositions.loc[df_dispositions['account_id'] == loan['account_id']]
df_client_disposition = df_clients.loc[df_clients['client_id'] == df_loan_disposition.iloc[0]['client_id']]
trans_date = list(str(df_client_disposition.iloc[0]['birth_number']))
month = int(trans_date[2] + trans_date[3])
#print(month)
if (month > 12):
return 'F'
else:
return 'M'
def get_feature_age(loan):
df_loan_disposition = df_dispositions.loc[df_dispositions['account_id'] == loan['account_id']]
df_client_disposition = df_clients.loc[df_clients['client_id'] == df_loan_disposition.iloc[0]['client_id']]
trans_date = list(str(df_client_disposition.iloc[0]['birth_number']))
year = int(trans_date[0] + trans_date[1])
age = 97 - year
return age
df_train = | pd.read_csv(r'C:\Users\39327\Desktop\ARTIFICIAL INTELLIGENCE\YEAR 2\SEMESTER 1 (PORTO)\KE & ML\loan_train.csv',sep=';') | pandas.read_csv |
import config
import pandas as pd
import pickle
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import classification_report
import tensorflow as tf
from keras import Sequential
from tensorflow.keras.layers import Embedding, SpatialDropout1D, LSTM, Dense
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras import regularizers
from keras.models import load_model
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix
"""
Versuch #1
"""
# Gibt den classification-report aus
def evaluate(model, X_test, Y_test):
Y_pred = model.predict(X_test)
Y_pred = Y_pred.argmax(axis=-1)
Y_test = Y_test.argmax(axis=-1)
print(classification_report([Y_test], [Y_pred]))
# Nimmt ein history-Objekt und zeichnet den loss für
# sowohl testing als auch training Daten.
def plot_model(history, fold):
plt.title('Loss')
plt.plot(history.history['loss'], label='train_loss')
plt.plot(history.history['val_loss'], label='test_loss')
plt.legend()
plt.savefig(f"../plots/covid_model_without_vaccine_loss_{config.EPOCHS}epochs_{fold}v{config.K_FOLD_SPLITS}fold.png")
clear_plot()
plt.title('Accuracy')
plt.plot(history.history['accuracy'], label='train_acc', c="r")
plt.plot(history.history['val_accuracy'], label='test_acc', c="b")
plt.legend()
plt.savefig(f"../plots/covid_model_without_vaccine_accuracy_{config.EPOCHS}epochs_{fold}v{config.K_FOLD_SPLITS}fold.png")
clear_plot()
def clear_plot():
plt.close()
plt.cla()
plt.clf()
def plot_confusion_matrix(model, X_test, y_test, fold):
y_pred = model.predict(X_test)
y_pred = y_pred.argmax(axis=-1)
y_test = y_test.argmax(axis=-1)
cm = confusion_matrix(y_test, y_pred)
ax=plt.subplot()
sns.heatmap(cm, annot=True, fmt='g', ax=ax)
# labels, title and ticks
ax.set_xlabel('Predicted labels')
ax.set_ylabel('True labels')
ax.set_title(f'Confusion Matrix – {config.EPOCHS}|{fold}')
ax.xaxis.set_ticklabels(['Negative', 'Positive'])
ax.yaxis.set_ticklabels(['Negative', 'Positive'])
plt.savefig(f"../plots/covid_confusion_{config.EPOCHS}epochs_{fold}v{config.K_FOLD_SPLITS}fold.png")
clear_plot()
# Erstellen eines Tokenizers für das LSTM Modell
def create_tokenizer(df, save_path):
tokenizer = Tokenizer(num_words=config.MAX_NUM_WORDS, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True)
words = df.link.values.tolist()
words.extend(df.meta_data.values.tolist())
words.extend(df.title.values.tolist())
words.extend(df.body.values.tolist())
tokenizer.fit_on_texts(words)
save_tokenizer(tokenizer, save_path)
return tokenizer
# Laden und speichern des Tokenizers
def save_tokenizer(tokenizer, filename):
with open(filename, 'wb') as f:
pickle.dump(tokenizer, f, protocol=pickle.HIGHEST_PROTOCOL)
def load_tokenizer(filename):
with open(filename, 'rb') as f:
tokenizer = pickle.load(f)
return tokenizer
"""
Die in Tokens verwandelte Texte sehen so aus:
[[1, 2, 3, 4], [5, 6, 7], [8, 9, 10, 11, 12]]
gepaddet sehen sie so aus:
[[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 2 3 4]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 5 6 7]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 8 9 10 11 12]]
werden danach die Covid Count Zahlen angefügt, sieht die Repräsentation beispielsweise so aus
[[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 2 3 4 10 20 30]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 5 6 7 40 50 60]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 8 9 10 11 12 70 80 90]]
Das np.expand ist notwendig, um das array in beispielsweise folgende Form zu bringen: [ 2 1 20] => [ [2] [1] [20]]
"""
def transform_text(tokenizer, df):
if (isinstance(tokenizer, str)):
tokenizer = load_tokenizer(tokenizer)
# Tokenizing der Link Informationen
X_input = tokenizer.texts_to_sequences(df['link'].values)
X_input = pad_sequences(X_input, maxlen=config.MAX_LINK_SEQUENCE_LENGTH)
# Tokenizing der Meta Informationen
X_meta = tokenizer.texts_to_sequences(df['meta_data'].values)
X_meta = pad_sequences(X_meta, maxlen=config.MAX_META_SEQUENCE_LENGTH)
# Tokenizing der Titel Informationen
X_title = tokenizer.texts_to_sequences(df['title'].values)
X_title = pad_sequences(X_title, maxlen=config.MAX_TITLE_SEQUENCE_LENGTH)
# Tokenizing des Seiteninhalts
X_body = tokenizer.texts_to_sequences(df['body'].values)
X_body = pad_sequences(X_body, maxlen=config.MAX_BODY_SEQUENCE_LENGTH)
covid_word_count = df['covid_word_count'].values
covid_word_count_url = df['covid_word_count_url'].values
restriction_word_count = df['restriction_word_count'].values
restriction_word_count_url = df['restriction_word_count_url'].values
X_input = np.concatenate([X_input, X_meta], axis=-1)
X_input = np.concatenate([X_input, X_title], axis=-1)
X_input = np.concatenate([X_input, X_body], axis=-1)
covid_word_count = np.expand_dims(covid_word_count, axis=(-1))
X_input = np.concatenate([X_input, covid_word_count], axis=-1)
covid_word_count_url = np.expand_dims(covid_word_count_url, axis=(-1))
X_input = np.concatenate([X_input, covid_word_count_url], axis=-1)
restriction_word_count = np.expand_dims(restriction_word_count, axis=(-1))
X_input = np.concatenate([X_input, restriction_word_count], axis=-1)
restriction_word_count_url = np.expand_dims(restriction_word_count_url, axis=(-1))
X_input = np.concatenate([X_input, restriction_word_count_url], axis=-1) # Schlussendlich alles zusammefügen
return X_input
def remove_stopwords(df):
ger = pd.read_csv(config.STOPWORDS_PATH)['stopwords'].values
df['link'] = df['link'].apply(lambda x: ' '.join([word for word in str(x).split() if word not in (ger)]))
df['meta_data'] = df['meta_data'].apply(lambda x: ' '.join([word for word in str(x).split() if word not in (ger)]))
df['title'] = df['title'].apply(lambda x: ' '.join([word for word in str(x).split() if word not in (ger)]))
df['body'] = df['body'].apply(lambda x: ' '.join([word for word in str(x).split() if word not in (ger)]))
return df
# Nimmt den input DataFrame und einen LabelEncoder Objekt,
# trainiert ein LSTM Modell, speichert es, evaluiert es
# und gibt den Loss aus.
def train_model(train_df, valid_df, tokenizer, fold):
X_train = transform_text(tokenizer, train_df)
X_valid = transform_text(tokenizer, valid_df)
Y_train = pd.get_dummies(train_df['label'])
Y_valid = | pd.get_dummies(valid_df['label']) | pandas.get_dummies |
import re
import pandas as pd
from deepsvr.utils import to_numeric
BASE_METRICS = ['count', 'avg_mapping_quality', 'avg_basequality',
'avg_se_mapping_quality', 'num_plus_strand',
'num_minus_strand', 'avg_pos_as_fraction',
'avg_num_mismaches_as_fraction', 'avg_sum_mismatch_qualities',
'num_q2_containing_reads',
'avg_distance_to_q2_start_in_q2_reads',
'avg_clipped_length', 'avg_distance_to_effective_3p_end']
class ReadCount:
"""Parse bam-readcount out into dict or pandas.DataFrame"""
def __init__(self, file_path):
"""Initialize dict of bam-readcount file.
Args:
file_path (str): File path of bam-readcount file
"""
self.read_count_dict = self._parse(file_path)
self.read_count_df = | pd.DataFrame() | pandas.DataFrame |
import pathlib
import pickle
import pandas as pd
from pyxlsb import open_workbook as open_xlsb
def load_pickle(parent_dir):
"""
Accepts a parent directory Path object
which can contain only one pickled file with a .pkl extension
Finds the file, loads it, and returns it
"""
filepath = list(parent_dir.glob('*.pkl'))[0]
print(filepath)
with open(str(filepath), 'rb') as f:
data = pickle.load(f)
return data
def make_filepaths_from_dfs(top_dest_dir, dfs, data_origin):
"""
Accepts:
- a parent directory - top_dest_dir as a Path object
- a list of dataframes - dfs
containing columns and data for nation, league and season
- a string representing the original source of the data
Returns a list of filepaths associated with each dataframe
"""
filepaths = []
for df in dfs:
nation = df['nation'].unique()[0]
league = df['league'].unique()[0]
season = df['season'].unique()[0]
filename = str(season) + '.csv'
filepath = top_dest_dir / data_origin / nation / league / season / filename
filepaths.append(filepath)
return filepaths
def write_dfs_to_filepaths(dfs, filepaths):
"""
Accepts a list of pandas dataframes - dfs
and a parralel list of filepaths - pathlib path objects
Writes the dataframes to the filepaths as csvs with no index
Returns the number of files written integer
"""
n = 0
for df, filepath in zip(dfs, filepaths):
if not filepath.exists():
filepath.parent.mkdir(parents=True, exist_ok=True)
df.to_csv(filepath, index=False)
n += 1
return n
def pandas_read_xlsb_file(filepath):
"""
https://stackoverflow.com/questions/45019778/read-xlsb-file-in-pandas-python
Accepts a filepath - pathlib.Path object
Reads an xlsb file into a dataframe
Returns the dataframe
"""
dfs = []
with open_xlsb(filepath) as wb:
with wb.get_sheet(1) as sheet:
for row in sheet.rows():
dfs.append([item.v for item in row])
df = | pd.DataFrame(dfs[1:], columns=dfs[0]) | pandas.DataFrame |
import os
import sys
import wx
import wx.adv
import pandas as pd
import xlsxwriter
from ObjectListView import ObjectListView, ColumnDefn, FastObjectListView
from threading import Thread
from pubsub import pub
from components.drug_dialog import DrugRegFormDialog
CLOSE_PROGRESS_BAR_SIGNAL = 'close-progressbar'
WRITE_TO_EXCEL_FILE_SIGNAL = 'write-to-excel-file'
ENABLE_BUTTONS = 'enable-buttons'
DISABLE_BUTTONS = 'disable-buttons'
class PulseProgressBarDialog(wx.ProgressDialog):
def __init__(self, *args, abort_message='abort'):
super(PulseProgressBarDialog, self)\
.__init__(*args, style=wx.PD_AUTO_HIDE | wx.PD_APP_MODAL)
pub.subscribe(self.close, CLOSE_PROGRESS_BAR_SIGNAL)
while self.GetValue() != self.GetRange():
self.Pulse()
def close(self):
self.Update(self.GetRange())
class ReadExcelThread(Thread):
def __init__(self, filepath, message):
super(ReadExcelThread, self).__init__()
self._filepath = filepath
self._message = message
self.start()
def run(self):
df = pd.read_excel(self._filepath)
df = df.dropna(how='all').fillna('')
wx.CallAfter(pub.sendMessage, self._message, df=df)
class BiogramGeneratorThread(Thread):
def __init__(self, data, date_col, identifier_col, organism_col, indexes, keys,
include_count, include_percent, include_narst, columns, drug_data):
super(BiogramGeneratorThread, self).__init__()
self.drug_data = drug_data
self.data = data
self.date_col = date_col
self.identifier_col = identifier_col
self.organism_col = organism_col
self.columns = columns
self.indexes = indexes
self.keys = keys
self.include_count = include_count
self.include_percent = include_percent
self.include_narst = include_narst
self.start()
def run(self):
# TODO: remove hard-coded organism file
organism_df = pd.read_excel(os.path.join('appdata', 'organisms2020.xlsx'))
melted_df = self.data.melt(id_vars=self.keys)
_melted_df = pd.merge(melted_df, organism_df, how='inner')
_melted_df = pd.merge(_melted_df, self.drug_data,
right_on='abbr', left_on='variable', how='outer')
indexes = [self.columns[idx] for idx in self.indexes]
total = _melted_df.pivot_table(index=indexes,
columns=['group', 'variable'], aggfunc='count')
sens = _melted_df[_melted_df['value'] == 'S'].pivot_table(index=indexes,
columns=['group', 'variable'],
aggfunc='count')
resists = _melted_df[(_melted_df['value'] == 'I') | (_melted_df['value'] == 'R')] \
.pivot_table(index=indexes, columns=['group', 'variable'], aggfunc='count')
biogram_resists = (resists / total * 100).applymap(lambda x: round(x, 2))
biogram_sens = (sens / total * 100).applymap(lambda x: round(x, 2))
formatted_total = total.applymap(lambda x: '' if pd.isna(x) else '{:.0f}'.format(x))
biogram_narst_s = biogram_sens.fillna('-').applymap(str) + " (" + formatted_total + ")"
biogram_narst_s = biogram_narst_s.applymap(lambda x: '' if x.startswith('-') else x)
wx.CallAfter(pub.sendMessage, CLOSE_PROGRESS_BAR_SIGNAL)
wx.CallAfter(pub.sendMessage,
WRITE_TO_EXCEL_FILE_SIGNAL,
sens=sens if self.include_count else None,
resists=resists if self.include_count else None,
biogram_sens=biogram_sens if self.include_percent else None,
biogram_resists=biogram_resists if self.include_percent else None,
biogram_narst_s=biogram_narst_s if self.include_narst else None,
identifier_col=self.identifier_col)
class DataRow(object):
def __init__(self, id, series) -> None:
self.id = id
for k, v in series.items():
setattr(self, k, v)
def to_list(self, columns):
return [getattr(self, c) for c in columns]
def to_dict(self, columns):
return {c: getattr(self, c) for c in columns}
class BiogramIndexDialog(wx.Dialog):
def __init__(self, parent, columns, title='Biogram Indexes', start=None, end=None):
super().__init__(parent, title=title, style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
self.indexes = []
self.choices = columns
main_sizer = wx.BoxSizer(wx.VERTICAL)
instruction = wx.StaticText(self, label='Data will be organized in hierarchy based on the order in the list.')
self.chlbox = wx.CheckListBox(self, choices=columns)
self.chlbox.Bind(wx.EVT_CHECKLISTBOX, self.on_checked)
self.index_items_list = wx.ListCtrl(self, wx.ID_ANY, style=wx.LC_REPORT, size=(300, 200))
self.index_items_list.AppendColumn('Level')
self.index_items_list.AppendColumn('Attribute')
dateBoxSizer = wx.StaticBoxSizer(wx.HORIZONTAL, self, label='Date Range')
self.startDate = wx.adv.DatePickerCtrl(self, dt=start)
startDateLabel = wx.StaticText(self, label='Start')
self.endDate = wx.adv.DatePickerCtrl(self, dt=end)
endDateLabel = wx.StaticText(self, label='End')
dateBoxSizer.Add(startDateLabel, 0, wx.ALL, 5)
dateBoxSizer.Add(self.startDate, 0, wx.ALL, 5)
dateBoxSizer.Add(endDateLabel, 0, wx.ALL, 5)
dateBoxSizer.Add(self.endDate, 0, wx.ALL, 5)
outputBoxSizer = wx.StaticBoxSizer(wx.VERTICAL, self, label='Output')
self.includeCount = wx.CheckBox(self, label='Raw counts')
self.includePercent = wx.CheckBox(self, label='Percents')
self.includeNarstStyle = wx.CheckBox(self, label='NARST format')
self.includeCount.SetValue(True)
self.includePercent.SetValue(True)
self.includeNarstStyle.SetValue(True)
outputBoxSizer.Add(self.includeCount, 0, wx.ALL, 5)
outputBoxSizer.Add(self.includePercent, 0, wx.ALL, 5)
outputBoxSizer.Add(self.includeNarstStyle, 0, wx.ALL, 5)
main_sizer.Add(instruction, 0, wx.ALL, 5)
main_sizer.Add(self.chlbox, 1, wx.ALL | wx.EXPAND, 10)
main_sizer.Add(self.index_items_list, 1, wx.ALL | wx.EXPAND, 10)
main_sizer.Add(dateBoxSizer, 0, wx.ALL | wx.EXPAND, 5)
main_sizer.Add(outputBoxSizer, 0, wx.ALL | wx.EXPAND, 5)
btn_sizer = wx.StdDialogButtonSizer()
ok_btn = wx.Button(self, id=wx.ID_OK, label='Generate')
ok_btn.SetDefault()
cancel_btn = wx.Button(self, id=wx.ID_CANCEL, label='Cancel')
btn_sizer.AddButton(ok_btn)
btn_sizer.AddButton(cancel_btn)
btn_sizer.Realize()
main_sizer.Add(btn_sizer, 0, wx.ALL | wx.ALIGN_CENTER, 10)
main_sizer.SetSizeHints(self)
self.SetSizer(main_sizer)
main_sizer.Fit(self)
def on_checked(self, event):
item = event.GetInt()
if not self.chlbox.IsChecked(item):
idx = self.indexes.index(item)
self.index_items_list.DeleteItem(idx)
self.indexes.remove(item)
else:
self.indexes.append(item)
self.index_items_list.Append([len(self.indexes), self.choices[item]])
class NewColumnDialog(wx.Dialog):
def __init__(self, parent, data, title='Edit values and save to a new column'):
super().__init__(parent, title=title, style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
main_sizer = wx.BoxSizer(wx.VERTICAL)
colname_label = wx.StaticText(self, label='New column name')
self.colname_ctrl = wx.TextCtrl(self)
btn_sizer = wx.StdDialogButtonSizer()
ok_btn = wx.Button(self, id=wx.ID_OK, label='Create')
ok_btn.SetDefault()
cancel_btn = wx.Button(self, id=wx.ID_CANCEL, label='Cancel')
btn_sizer.AddButton(ok_btn)
btn_sizer.AddButton(cancel_btn)
btn_sizer.Realize()
self.olvData = ObjectListView(self, wx.ID_ANY, style=wx.LC_REPORT | wx.SUNKEN_BORDER)
self.olvData.oddRowsBackColor = wx.Colour(230, 230, 230, 100)
self.olvData.evenRowsBackColor = wx.WHITE
self.olvData.cellEditMode = ObjectListView.CELLEDIT_DOUBLECLICK
self.data = []
for dt in data:
self.data.append({'old': dt, 'new': dt})
self.olvData.SetColumns([
ColumnDefn(title='Old Value', align='left', minimumWidth=50, valueGetter='old'),
ColumnDefn(title='New Value', align='left', minimumWidth=50, valueGetter='new'),
])
self.olvData.SetObjects(self.data)
main_sizer.Add(self.olvData, 1, wx.ALL | wx.EXPAND, 5)
main_sizer.Add(colname_label, 0, wx.ALL, 5)
main_sizer.Add(self.colname_ctrl, 0, wx.ALL, 5)
main_sizer.Add(btn_sizer, 0, wx.ALL | wx.ALIGN_CENTER, 5)
self.SetAutoLayout(True)
self.SetSizer(main_sizer)
main_sizer.Fit(self)
def replace(self):
return self.data
class DrugListCtrl(wx.ListCtrl):
def __init__(self, parent, cols):
super(DrugListCtrl, self).__init__(parent, style=wx.LC_REPORT, size=(300, 200))
self.EnableCheckBoxes(True)
self.Bind(wx.EVT_LIST_ITEM_CHECKED, self.on_check)
self.Bind(wx.EVT_LIST_ITEM_UNCHECKED, self.on_uncheck)
self.cols = cols
self.drugs = []
self.AppendColumn('Name')
for col in cols:
self.Append([col])
for d in config.Read('Drugs').split(';'):
if d in self.cols:
idx = self.cols.index(d)
self.CheckItem(idx)
def on_check(self, event):
item = event.GetItem()
idx = item.GetId()
col = self.cols[idx]
if self.IsItemChecked(idx):
self.drugs.append(col)
def on_uncheck(self, event):
item = event.GetItem()
idx = item.GetId()
col = self.cols[idx]
if col in self.drugs:
self.drugs.remove(col)
class ConfigDialog(wx.Dialog):
def __init__(self, parent, columns, title='Configuration'):
super().__init__(parent, title=title, style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
main_sizer = wx.BoxSizer(wx.VERTICAL)
form_sizer = wx.FlexGridSizer(5, 2, 15, 20)
form_sizer.Add(wx.StaticText(self, id=wx.ID_ANY, label='Identifier'), 0)
self.identifier_combo_ctrl = wx.Choice(self, choices=columns)
_col = config.Read('IdentifierCol', '')
if _col and _col in columns:
self.identifier_combo_ctrl.SetSelection(columns.index(_col))
form_sizer.Add(self.identifier_combo_ctrl, 0)
form_sizer.Add(wx.StaticText(self, id=wx.ID_ANY, label='Date'))
self.date_combo_ctrl = wx.Choice(self, choices=columns)
_col = config.Read('DateCol', '')
if _col and _col in columns:
self.date_combo_ctrl.SetSelection(columns.index(_col))
form_sizer.Add(self.date_combo_ctrl, 0)
form_sizer.Add(wx.StaticText(self, id=wx.ID_ANY, label='Organism Code'))
self.organism_combo_ctrl = wx.Choice(self, choices=columns)
_col = config.Read('OrganismCol', '')
if _col and _col in columns:
self.organism_combo_ctrl.SetSelection(columns.index(_col))
form_sizer.Add(self.organism_combo_ctrl, 0)
form_sizer.Add(wx.StaticText(self, id=wx.ID_ANY, label='Specimens'))
self.specimens_combo_ctrl = wx.Choice(self, choices=columns)
_col = config.Read('SpecimensCol', '')
if _col and _col in columns:
self.specimens_combo_ctrl.SetSelection(columns.index(_col))
form_sizer.Add(self.specimens_combo_ctrl, 0)
self.drug_listctrl = DrugListCtrl(self, columns)
form_sizer.Add(wx.StaticText(self, id=wx.ID_ANY, label='Drugs'))
form_sizer.Add(self.drug_listctrl, 1, wx.EXPAND)
btn_sizer = wx.StdDialogButtonSizer()
ok_btn = wx.Button(self, id=wx.ID_OK, label='Ok')
ok_btn.SetDefault()
cancel_btn = wx.Button(self, id=wx.ID_CANCEL, label='Cancel')
btn_sizer.AddButton(ok_btn)
btn_sizer.AddButton(cancel_btn)
btn_sizer.Realize()
main_sizer.Add(form_sizer, 0, wx.ALL | wx.EXPAND, 10)
main_sizer.Add(btn_sizer, 0, wx.ALL | wx.ALIGN_CENTER, 10)
main_sizer.SetSizeHints(self)
self.SetSizer(main_sizer)
main_sizer.Fit(self)
class DeduplicateIndexDialog(wx.Dialog):
def __init__(self, parent, columns, title='Deduplication Keys'):
super().__init__(parent, title=title, style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
self.keys = []
main_sizer = wx.BoxSizer(wx.VERTICAL)
instruction = wx.StaticText(self, label='Select columns you want to use for deduplication.')
self.isSortDate = wx.CheckBox(self, label='Sort by the date column')
self.isSortDate.SetValue(True)
self.chlbox = wx.CheckListBox(self, choices=columns)
self.chlbox.Bind(wx.EVT_CHECKLISTBOX, self.on_checked)
button_sizer = self.CreateStdDialogButtonSizer(flags=wx.OK | wx.CANCEL)
main_sizer.Add(instruction, 0, wx.ALL, 5)
main_sizer.Add(self.chlbox, 1, wx.ALL | wx.EXPAND, 5)
main_sizer.Add(self.isSortDate, 0, wx.ALL, 5)
main_sizer.Add(button_sizer, 0, wx.ALL, 5)
self.SetSizer(main_sizer)
self.Fit()
def on_checked(self, event):
item = event.GetInt()
if not self.chlbox.IsChecked(item):
idx = self.keys.index(item)
self.keys.remove(item)
else:
self.keys.append(item)
class MainFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, parent=None, id=wx.ID_ANY, title="Mivisor Version 2021.1", size=(800, 600))
panel = wx.Panel(self)
# TODO: figure out how to update the statusbar's text from the frame's children
self.statusbar = self.CreateStatusBar(2)
self.statusbar.SetStatusText('The app is ready to roll.')
self.statusbar.SetStatusText('This is for the analytics information', 1)
menuBar = wx.MenuBar()
fileMenu = wx.Menu()
registryMenu = wx.Menu()
menuBar.Append(fileMenu, '&File')
menuBar.Append(registryMenu, 'Re&gistry')
loadItem = fileMenu.Append(wx.ID_ANY, 'Load Data', 'Load Data')
exportItem = fileMenu.Append(wx.ID_ANY, 'Export Data', 'Export Data')
fileMenu.AppendSeparator()
fileItem = fileMenu.Append(wx.ID_EXIT, '&Quit', 'Quit Application')
drugItem = registryMenu.Append(wx.ID_ANY, 'Drugs', 'Drug Registry')
self.SetMenuBar(menuBar)
self.Bind(wx.EVT_MENU, lambda x: self.Close(), fileItem)
self.Bind(wx.EVT_MENU, self.open_drug_dialog, drugItem)
self.Bind(wx.EVT_MENU, self.export_data, exportItem)
self.Bind(wx.EVT_MENU, self.open_load_data_dialog, loadItem)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Center()
self.Maximize(True)
self.load_drug_data()
self.df = pd.DataFrame()
self.data = []
self.colnames = []
self.organism_col = config.Read('OrganismCol', '')
self.identifier_col = config.Read('IdentifierCol', '')
self.date_col = config.Read('DateCol', '')
self.specimens_col = config.Read('SpecimensCol', '')
self.drugs_col = config.Read('Drugs', '').split(';') or []
main_sizer = wx.BoxSizer(wx.VERTICAL)
btn_sizer = wx.BoxSizer(wx.HORIZONTAL)
load_button = wx.Button(panel, label="Load")
self.copy_button = wx.Button(panel, label="Copy Column")
self.config_btn = wx.Button(panel, label='Config')
self.generate_btn = wx.Button(panel, label='Generate')
load_button.Bind(wx.EVT_BUTTON, self.open_load_data_dialog)
self.copy_button.Bind(wx.EVT_BUTTON, self.copy_column)
self.config_btn.Bind(wx.EVT_BUTTON, self.configure)
self.generate_btn.Bind(wx.EVT_BUTTON, self.generate)
self.dataOlv = FastObjectListView(panel, wx.ID_ANY,
style=wx.LC_REPORT | wx.SUNKEN_BORDER)
self.dataOlv.oddRowsBackColor = wx.Colour(230, 230, 230, 100)
self.dataOlv.evenRowsBackColor = wx.WHITE
self.dataOlv.cellEditMode = ObjectListView.CELLEDIT_DOUBLECLICK
self.dataOlv.SetEmptyListMsg('Welcome to Mivisor Version 2021.1')
self.dataOlv.SetObjects([])
main_sizer.Add(self.dataOlv, 1, wx.ALL | wx.EXPAND, 10)
btn_sizer.Add(load_button, 0, wx.ALL, 5)
btn_sizer.Add(self.copy_button, 0, wx.ALL, 5)
btn_sizer.Add(self.config_btn, 0, wx.ALL, 5)
btn_sizer.Add(self.generate_btn, 0, wx.ALL, 5)
main_sizer.Add(btn_sizer, 0, wx.ALL, 5)
panel.SetSizer(main_sizer)
panel.Fit()
self.disable_buttons()
pub.subscribe(self.disable_buttons, DISABLE_BUTTONS)
pub.subscribe(self.enable_buttons, ENABLE_BUTTONS)
pub.subscribe(self.write_output, WRITE_TO_EXCEL_FILE_SIGNAL)
def OnClose(self, event):
if event.CanVeto():
if wx.MessageBox('You want to quit the program?', 'Please confirm', style=wx.YES_NO) != wx.YES:
event.Veto()
return
event.Skip()
def disable_buttons(self):
self.generate_btn.Disable()
self.copy_button.Disable()
self.config_btn.Disable()
def enable_buttons(self):
self.generate_btn.Enable()
self.copy_button.Enable()
self.config_btn.Enable()
def export_data(self, event):
df = pd.DataFrame([d.to_dict(self.colnames) for d in self.data])
if df.empty:
with wx.MessageDialog(self, 'No data to export. Please load data first.',
'Export Data', style=wx.OK) as dlg:
dlg.ShowModal()
return
with wx.FileDialog(self, "Please select the output file for data",
wildcard="Excel file (*xlsx)|*xlsx",
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) as file_dialog:
if file_dialog.ShowModal() == wx.ID_CANCEL:
return
file_path = file_dialog.GetPath()
if os.path.splitext(file_path)[1] != '.xlsx':
file_path = file_path + '.xlsx'
try:
df.to_excel(file_path, index=False)
except:
with wx.MessageDialog(self, 'Export failed.', 'Export Data', style=wx.OK) as dlg:
dlg.ShowModal()
else:
with wx.MessageDialog(self, 'Export completed.', 'Export Data', style=wx.OK) as dlg:
dlg.ShowModal()
def load_drug_data(self):
try:
drug_df = pd.read_json(os.path.join('appdata', 'drugs.json'))
except:
pass
if drug_df.empty:
drug_df = | pd.DataFrame(columns=['drug', 'abbreviation', 'group']) | pandas.DataFrame |
import pandas as pd
import os, requests, logging
import sys
# from bs4 import BeautifulSoup as bs
from .utils import *
class EdgarBase(object):
def __init__(self, dir_edgar=None):
# self.dir_edgar =
# self.__dir_download = None
# self.__dir_data = None
self.__dir_output = None
self.ulr_sec = 'https://www.sec.gov/Archives/'
self.__dir_config = None
self.dir_curr = os.path.abspath(os.path.dirname(__file__))
self.dir_config = os.path.join(self.dir_curr, 'config')
self.today = pd.datetime.today()
self.__fact_mapping = None
self.__dir_edgar = dir_edgar
self.__cache_file = {}
@property
def dir_edgar(self):
if self.__dir_edgar is None:
logger.error('please set output data directory ')
if 'DIR_EDGAR' not in os.environ:
logger.error('please set environment variable DIR_EDGAR')
logger.error("os.environ['DIR_EDGAR']=/path/to/dir'")
import tempfile
self.__dir_edgar = tempfile.gettempdir()
else:
self.__dir_edgar = os.environ['DIR_EDGAR']
return self.__dir_edgar
def set_dir_edgar(self, dir_edgar):
if not os.path.exists(dir_edgar):
os.makedirs(dir_edgar)
self.__dir_edgar = dir_edgar
return self
@property
def _dir_download(self):
# dir_download = os.path.join(self.dir_edgar, 'download')
# if not os.path.isdir(dir_download):
# os.makedirs(dir_download)
return self.dir_edgar
def set_dir_config(self, dir_input):
logger.info('setting dir_config={f}'.format(f=dir_input))
self.dir_curr = dir_input
@property
def fact_mapping(self):
if self.__fact_mapping is None:
path_fact_mapping = os.path.join(self.dir_config, 'fact_mapping.csv')
logger.info('reading fact_mapping from {f}'.format(f=path_fact_mapping))
fm = pd.read_csv(path_fact_mapping).set_index('item')
self.__fact_mapping = fm
else:
fm = self.__fact_mapping
return fm
def get_cik(self, ticker):
return ticker2cik(ticker)
def get_filing_path(self, ticker, filing_type=None, start_date=None, end_date=None):
"""
:param ticker:
:param filing_type: '10-K', '10-Q', etc...
:param start_date: str or datetime
:param end_date: str or datetime
:return: data frame columns=ticker|cik|filing_type|date|filepath
"""
pass
def parse_filing(self, filepath, section):
pass
def reindex_master(self, start_date=None, end_date=None):
pass
class EdgarDownloader(EdgarBase):
def __init__(self, dir_edgar):
super(EdgarDownloader, self).__init__(dir_edgar)
self.__conn_master_db = None
self.valid_form_type = ['10-Q', '10-K', '8-K']
def __exit__(self):
self._close_master_db()
@property
def _dir_master(self):
dir_master = os.path.join(self.dir_edgar, 'master')
if not os.path.isdir(dir_master):
os.makedirs(dir_master)
return dir_master
@property
def conn_master_db(self):
file_master_db = os.path.join(self.dir_edgar, 'master_idx.db')
if self.__conn_master_db is None:
import sqlite3
if not os.path.exists(file_master_db):
conn = sqlite3.connect(file_master_db)
pd.DataFrame().to_sql('master_idx', conn)
else:
conn = sqlite3.connect(file_master_db)
self.__conn_master_db = conn
return self.__conn_master_db
def _close_master_db(self):
conn = self.__conn_master_db
conn.close()
self.__conn_master_db = None
def load_master_db(self, start_date, end_date=None, force_reload=False):
#start_date = pd.to_datetime(str(start_date))
#end_date = pd.datetime.today() if end_date is None else pd.to_datetime(str(end_date))
list_yyyyqq = self._yyyyqq_between(start_date, end_date)
"edgar/full-index/{yyyy}/QTR{q}/master.idx"
list_file_master = ["edgar/full-index/{y}/QTR{q}/master.idx".format(y=yq.split('Q')[0], q=yq.split('Q')[1])
for yq in list_yyyyqq]
#list_file_download = [f for f in list_file_master if not os.path.exists(f) or force_reload]
list_file_downloaded = download_list(list_file_master, self.dir_edgar, force_download=force_reload)
self._update_master_db(list_file_downloaded)
def _update_master_db(self, list_files):
conn = self.conn_master_db
col_names = ['cik', 'company_name', 'form_type', 'date_filed', 'filename']
dfs = dd.read_csv(list_files, sep='|', skiprows=11, header=None)
dfs.columns = col_names
df_load = dfs[dfs['form_type'].isin(self.valid_form_type)].compute()
sql_all = 'select * from master_idx'
df_all = pd.read_sql_query(sql_all, conn)
logger.info('read master_idx db, n={s}'.format(s=df_all.shape[0]))
df_all = pd.concat([df_all, df_load], sort=False).drop_duplicates()
df_all.to_sql('master_idx', conn, if_exists='replace', index=False)
logger.info('write master_idx db, n={s}'.format(s=df_all.shape[0]))
return 0
# def _refresh_master_idx(self, yyyy, q, force=False):
# # yyyy, q = self._year_quarter(date)
# file_master = os.path.join(self._dir_master, "{y}_QTR{q}_master.csv".format(y=yyyy, q=q))
# if not os.path.exists(file_master) or force:
# url_master = self._url_master_idx(yyyy, q)
# logger.info('downloading {f}'.format(f=url_master))
# resp = req.get(url_master)
# if resp.status_code != 200:
# logger.error('error downloading {f}'.format(f=url_master))
# else:
# write_data = '\n'.join(resp.content.decode('latin1').split('\n')[11:])
# logger.info('saving {f}'.format(f=file_master))
# with open(file_master, 'w+', encoding='utf-8') as f:
# f.write("cik|company|form_type|file_date|file_name\n")
# f.write(write_data)
# self._update_master_db([file_master])
# else:
# logger.info('use existing file. {f}'.format(f=file_master))
# return file_master
def filings_between(self, symbol, start_date, end_date=None, form_type='10-K', download=True):
#list_year_quarter = self._yyyyqq_between(start_date, end_date)
#list_master_file = [self._refresh_master_idx(t.split('Q')) for t in list_year_quarter]
# dfs = dd.read_csv(list_master_file, sep='|')
cik = int(ticker2cik(symbol))
# df_res = dfs[(dfs.cik == cik) & (dfs.form_type == form_type)].compute()
sql_filings = "select * from master_idx where cik=={cik} and form_type=='{f}' " \
"and date_filed>='{t0}' ".format(cik=cik, f=form_type, t0=pd.to_datetime(start_date).date())
if end_date:
sql_filings += "and file_date<'{t1}'".format(t1=pd.to_datetime(end_date).date())
df_res = pd.read_sql_query(sql_filings, self.conn_master_db)
list_filename = df_res['filename'].tolist()
if download:
list_filename = download_list(list_filename, self._dir_download, force_download=True)
return list_filename
# @staticmethod
# def _url_master_idx(yyyy, q):
# url = "https://www.sec.gov/Archives/edgar/full-index/{yyyy}/QTR{q}/master.idx".format(yyyy=yyyy, q=q)
# return url
# @staticmethod
# def _year_quarter(date=pd.datetime.today()):
# t = pd.to_datetime(date).date()
# return t.year, (t.month - 1) // 3 + 1
@staticmethod
def _yyyyqq(date):
yq = pd.Period(pd.to_datetime(str(date)), freq='Q')
return str(yq)
def _yyyyqq_between(self, start_date, end_date=None):
end_date = pd.datetime.today() if end_date is None else pd.to_datetime(end_date)
end_date += pd.tseries.offsets.QuarterEnd()
start_date = pd.to_datetime(str(start_date))
logger.info('using quarters between {t0} to {t1}'.format(t0=start_date, t1=end_date))
list_year_quarter = list(set(self._yyyyqq(t) for t in pd.date_range(start_date, end_date, freq='M')))
return list_year_quarter
class EdgarParser(EdgarBase):
def __init__(self, dir_edgar):
super(EdgarParser, self).__init__(dir_edgar)
# from arelle import Cntlr
self.ins_type = ['EX-101.INS']
self.xbrl_type = ['EX-101.INS', 'EX-101.SCH', 'EX-101.CAL', 'EX-101.CAL', 'EX-101.LAB', 'EX-101.PRE',
'EX-101.DEF']
# self.ctl = Cntlr.Cntlr()
self.ed = EdgarDownloader()
def _parse_txt(self, f_txt, header_only=False):
filing_id = os.path.basename(f_txt).split('.')[0]
if os.path.exists(f_txt):
with open(f_txt, 'r+') as f:
data = f.read()
re_header = re.search(r'<SEC-HEADER>([\s\S]*?)\n<\/SEC-HEADER>', data).group(1)
if not header_only:
re_doc = re.findall(r'<DOCUMENT>\n([\s\S]*?)\n<\/DOCUMENT>', data)
else:
logger.error('file not exists. file={f}'.format(f=f_txt))
dict_header_mapping = {"ACCESSION NUMBER": 'filing_id',
'ACCEPTANCE-DATETIME': 'filing_datetime',
'CONFORMED PERIOD OF REPORT': 'report_period',
'FILED AS OF DATE': 'asof_date',
'DATE AS OF CHANGE': 'change_date',
'CONFORMED SUBMISSION TYPE': 'form_type',
'PUBLIC DOCUMENT COUNT': 'doc_count', }
srs_header = parse_header(re_header)
if header_only:
df_doc = srs_header.to_frame().T.rename(columns=dict_header_mapping)
else:
list_df_doc = [parse_doc(d, include_text=True) for d in re_doc]
df_doc = pd.concat(list_df_doc, axis=1).T
for k, v in dict_header_mapping.items():
df_doc[v] = srs_header[k]
# df_doc['filing_id'] = filing_id
# df_doc['file_path'] = f_txt
df_doc['txt_dir'] = os.path.dirname(f_txt)
df_doc['txt_name'] = os.path.basename(f_txt)
# df_ins = df_doc[df_doc.TYPE=='EX-101.INS']
return df_doc
def _parse_ins_from_xml(self, f_xml, **kwargs):
if not f_xml.startswith('<'):
logger.info('trying to read f_xml. {f}'.format(f=f_xml[:100]))
with open(f_xml, 'r+') as f:
txt_ins = f.read()
else:
logger.info('use f_xml as string input. {f}'.format(f=f_xml[:50]))
txt_ins = f_xml
df_ins = parse_ins(txt_ins, **kwargs)
return df_ins
def _parse_ins_from_txt(self, f_txt, **kwargs):
df_doc = self._parse_txt(f_txt)
df_header = df_doc[['filing_id', 'filing_datetime', 'form_type']].iloc[[0]]
txt_ins = None
# for k in self.ins_type:
# if k in df_doc.TYPE.tolist():
# txt_ins = df_doc[df_doc.TYPE == k].TEXT.values[0]
try:
df_ins_type = df_doc[df_doc.TYPE.isin(self.ins_type)]
if df_ins_type.empty:
logger.warning('cannot find TYPE=EX-101.INS, match filename {f}'.format(f=f_txt))
sch_name = df_doc[df_doc.TYPE == 'EX-101.SCH'].FILENAME.values[0].split('.')[0]
df_ins_type = df_doc[(df_doc.TYPE == 'XML') & (df_doc.FILENAME.str.startswith(sch_name))]
logger.warning('use {f}'.format(f=df_ins_type.FILENAME.values[0]))
# df_ins_type = df_doc[df_doc.TYPE.isin(['10-K', '10-Q'])]
txt_ins = df_ins_type.TEXT.values[0]
except Exception as ex:
logger.error('cannot find ins TYPE in doc. list_types={l}'.format(l=df_doc.TYPE.tolist()))
logger.error('file={f}'.format(f=f_txt))
df_ins = self._parse_ins_from_xml(txt_ins, **kwargs)
df_header = df_header.reindex(df_ins.index).fillna(method='ffill')
df_ins = pd.concat([df_header, df_ins], axis=1)
return df_ins
def get_fact(self, ticker, start_date, end_date=None, item=None, form_type='10-K',
has_dimension=False):
list_filings = self.ed.filings_between(ticker, start_date, end_date, form_type=form_type)
list_df_ins = [self._parse_ins_from_txt(filing, has_dimension=has_dimension) for filing in list_filings]
df_ins = | pd.concat(list_df_ins) | pandas.concat |
"""HW1 - DATA PROCESSING
<NAME>
MAE 298 AEROACOUSTICS
HOMEWORK 1 - SIGNAL PROCESSING
CREATED: 04 OCT 2016
MODIFIY: 17 OCT 2016
DESCRIPTION: Read sound file of sonic boom and convert signal to
Narrow-band in Pa.
Compute Single-side power spectral density (FFT).
1/3 octave and octave band
NOTE: use 'soundfile' module to read audio data. This normalizes data from
-1 to 1, like Matlab's 'audioread'. 'scipy.io.wavfile' does not normalize
"""
#IMPORT GLOBAL VARIABLES
from hw1_98_globalVars import *
import numpy as np
import pandas as pd
def ReadWavNorm(filename):
"""Read a .wav file and return sampling frequency.
Use 'soundfile' module, which normalizes audio data between -1 and 1,
identically to MATLAB's 'audioread' function
"""
import soundfile as sf
#Returns sampled data and sampling frequency
data, samplerate = sf.read(filename)
return samplerate, data
def ReadWav(filename):
"""NOTE: NOT USED IN THIS CODE, DOES NOT NORMALIZE LIKE MATLAB
Read a .wav file and return sampling frequency
Use 'scipy.io.wavfile' which doesn't normalize data.
"""
from scipy.io import wavfile
#Returns sample frequency and sampled data
sampFreq, snd = wavfile.read(filename)
#snd = Normalize(snd)
return sampFreq, snd
def Normalize(data):
"""NOTE: NOT USED IN THIS CODE, TRIED BUT FAILED TO NORMALIZE LIKE MATLAB
Trying to normalize data between -1 and 1 like matlab audioread
"""
data = np.array(data)
return ( 2*(data - min(data)) / (max(data) - min(data)) - 1)
def SPLt(P, Pref=20e-6):
"""Sound Pressure Level (SPL) in dB as a function of time.
P --> pressure signal (Pa)
Pref --> reference pressure
"""
PrmsSq = 0.5 * P ** 2 #RMS pressure squared
return 10 * np.log10(PrmsSq / Pref ** 2)
def SPLf(Gxx, T, Pref=20e-6):
"""Sound Pressure Level (SPL) in dB as a function of frequency
Gxx --> Power spectral density of a pressure signal (after FFT)
T --> Total time interval of pressure signal
Pref --> reference pressure
"""
return 10 * np.log10( (Gxx / T) / Pref ** 2 )
def OctaveCenterFreqsGen(dx=3, n=39):
"""NOTE: NOT USED IN THIS CODE. INSTEAD, OCTAVECENTERFREQS
Produce general center frequencies for octave-band spectra
dx --> frequency interval spacing (3 for octave, 1 for 1/3 octave)
n --> number of center freqs to product (starting at dx)
"""
fc30 = 1000 #Preferred center freq for m=30 is 1000Hz
m = np.arange(1, n+1) * dx #for n center freqs, multiply 1-->n by dx
freqs = fc30 * 2 ** (-10 + m/3) #Formula for center freqs
def OctaveBounds(fc, octv=1):
"""Get upper/lower frequency bounds for given octave band.
fc --> current center frequency
octv --> octave-band (octave-->1, 1/3 octave-->1/3)
"""
upper = 2 ** ( octv / 2) * fc
lower = 2 ** (-octv / 2) * fc
return upper, lower
def OctaveCenterFreqs(narrow, octv=1):
"""Calculate center frequencies (fc) for octave or 1/3 octave bands.
Provide original narrow-band frequency vector to bound octave-band.
Only return center frequencies who's lowest lower band limit or highest
upper band limit are within the original data set.
narrow --> original narrow-band frequencies (provides bounds for octave)
octv --> frequency interval spacing (1 for octave, 1/3 for 1/3 octave)
"""
fc30 = 1000 #Preferred center freq for m=30 is 1000Hz
freqs = []
for i in range(len(narrow)):
#current index
m = (3 * octv) * (i + 1) #octave, every 3rd, 1/3 octave, every 1
fc = fc30 * 2 ** (-10 + m/3) #Formula for center freq
fcu, fcl = OctaveBounds(fc, octv) #upper and lower bounds for fc band
if fcu > max(narrow):
break #quit if current fc is greater than original range
if fcl >= min(narrow):
freqs.append(fc) #if current fc is in original range, save
return freqs
def OctaveLp(Lp):
"""Given a range of SPLs that are contained within a given octave band,
perform the appropriate log-sum to determine the octave SPL
Lp --> SPL range in octave-band
"""
#Sum 10^(Lp/10) accross current octave-band, take log
Lp_octv = 10 * np.log10( np.sum( 10 ** (Lp / 10) ) )
return Lp_octv
def GetOctaveBand(df, octv=1):
"""Get SPL ( Lp(fc,m) ) for octave-band center frequencies.
Returns octave-band center frequencies and corresponding SPLs
df --> pandas dataframe containing narrow-band frequencies and SPL
octv --> octave-band type (octave-->1, 1/3 octave-->1/3)
"""
#Get Center Frequencies
fcs = OctaveCenterFreqs(df['freq'], octv)
Lp_octv = np.zeros(len(fcs))
for i, fc in enumerate(fcs):
#Get Upper/Lower center freqency band bounds
fcu, fcl = OctaveBounds(fc, octv)
band = df[df['freq'] >= fcl]
band = band[band['freq'] <= fcu]
#SPLs in current octave-band
Lp = np.array(band['SPL'])
#Sum 10^(Lp/10) accross current octave-band, take log
Lp_octv[i] = OctaveLp(Lp)
return fcs, Lp_octv
def main(source):
"""Perform calculations for frequency data processing
source --> file name of source sound file
"""
####################################################################
### READ SOUND FILE ################################################
####################################################################
df = pd.DataFrame() #Stores signal data
#Read source frequency (fs) and signal in volts normallized between -1&1
fs, df['V'] = ReadWavNorm( '{}/{}'.format(datadir, source) ) #Like matlab
#Convert to pascals
df['Pa'] = df['V'] * volt2pasc
####################################################################
### POWER SPECTRAL DENSITY #########################################
####################################################################
#TIME
#calculate time of each signal, in seconds, from source frequency
N = len(df['Pa']) #Number of data points in signal
dt = 1 / fs #time step
T = N * dt #total time interval of signal (s)
df['time'] = np.arange(N) * dt #individual sample times
idx = range(int(N/2)) #Indices of single-sided power spectrum (first half)
#POWER SPECTRUM
fft = np.fft.fft(df['Pa']) * dt #Fast-Fourier Transform
Sxx = np.abs(fft) ** 2 / T #Two-sided power spectrum
#Gxx = Sxx[idx] #Single-sided power spectrum
Gxx = 2 * Sxx[idx] #Single-sided power spectrum
#FREQUENCY
freqs = np.fft.fftfreq(df['Pa'].size, dt) #Frequencies
#freqs = np.arange(N) / T #Frequencies
freqs = freqs[idx] #single-sided frequencies
#COMBINE POWER SPECTRUM DATA INTO DATAFRAME
powspec = pd.DataFrame({'freq' : freqs, 'Gxx' : Gxx})
maxima = powspec[powspec['Gxx'] == max(powspec['Gxx'])]
print('\nMaximum Power Spectrum, frequency:t', float(maxima['freq']))
print( 'Maximum Power Spectrum, power:', float(maxima['Gxx' ]))
####################################################################
### FIND SOUND PRESSURE LEVEL IN dB ################################
####################################################################
#SPL VS TIME
df['SPL'] = SPLt(df['Pa'])
#SPL VS FREQUENCY
powspec['SPL'] = SPLf(Gxx, T)
####################################################################
### SONIC BOOM N-WAVE PEAK AND DURATION ############################
####################################################################
#SONIC BOOM PRESSURE PEAK
Pmax = max(abs(df['Pa']))
#SONIC BOOM N-WAVE DURATION
#Get shock starting and ending times and pressures
shocki = df[df['Pa'] == max(df['Pa'])] #Shock start
ti = float(shocki['time']) #start time
Pi = float(shocki['Pa']) #start (max) pressure
shockf = df[df['Pa'] == min(df['Pa'])] #Shock end
tf = float(shockf['time']) #start time
Pf = float(shockf['Pa']) #start (max) pressure
#Shockwave time duration
dt_Nwave = tf - ti
####################################################################
### OCTAVE-BAND CONVERSION #########################################
####################################################################
#1/3 OCTAVE-BAND
octv3rd = | pd.DataFrame() | pandas.DataFrame |
import itertools
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
from solarforecastarbiter.reference_forecasts import forecast
def assert_none_or_series(out, expected):
assert len(out) == len(expected)
for o, e in zip(out, expected):
if e is None:
assert o is None
else:
assert_series_equal(o, e)
def test_resample():
index = pd.date_range(start='20190101', freq='15min', periods=5)
arg = pd.Series([1, 0, 0, 0, 2], index=index)
idx_exp = pd.date_range(start='20190101', freq='1h', periods=2)
expected = pd.Series([0.25, 2.], index=idx_exp)
out = forecast.resample(arg)
assert_series_equal(out, expected)
assert forecast.resample(None) is None
@pytest.fixture
def rfs_series():
return pd.Series([1, 2],
index=pd.DatetimeIndex(['20190101 01', '20190101 02']))
@pytest.mark.parametrize(
'start,end,start_slice,end_slice,fill_method,exp_val,exp_idx', [
(None, None, None, None, 'interpolate', [1, 1.5, 2],
['20190101 01', '20190101 0130', '20190101 02']),
('20190101', '20190101 0230', None, None, 'interpolate',
[1, 1, 1, 1.5, 2, 2],
['20190101', '20190101 0030', '20190101 01', '20190101 0130',
'20190101 02', '20190101 0230']),
('20190101', '20190101 02', '20190101 0030', '20190101 0130', 'bfill',
[1., 1, 2], ['20190101 0030', '20190101 01', '20190101 0130'])
]
)
def test_reindex_fill_slice(rfs_series, start, end, start_slice, end_slice,
fill_method, exp_val, exp_idx):
exp = pd.Series(exp_val, index=pd.DatetimeIndex(exp_idx))
out = forecast.reindex_fill_slice(
rfs_series, freq='30min', start=start, end=end,
start_slice=start_slice, end_slice=end_slice, fill_method=fill_method)
assert_series_equal(out, exp)
def test_reindex_fill_slice_some_nan():
rfs_series = pd.Series([1, 2, None, 4], index=pd.DatetimeIndex([
'20190101 01', '20190101 02', '20190101 03', '20190101 04',
]))
start, end, start_slice, end_slice, fill_method = \
None, None, None, None, 'interpolate'
exp_val = [1, 1.5, 2, 2.5, 3, 3.5, 4]
exp_idx = [
'20190101 01', '20190101 0130', '20190101 02', '20190101 0230',
'20190101 03', '20190101 0330', '20190101 04']
exp = pd.Series(exp_val, index=pd.DatetimeIndex(exp_idx))
out = forecast.reindex_fill_slice(
rfs_series, freq='30min', start=start, end=end,
start_slice=start_slice, end_slice=end_slice, fill_method=fill_method)
assert_series_equal(out, exp)
def test_reindex_fill_slice_all_nan():
arg = pd.Series([None]*3, index=pd.DatetimeIndex(
['20190101 01', '20190101 02', '20190101 03']))
out = forecast.reindex_fill_slice(arg, freq='30min')
exp = pd.Series([None]*5, index=pd.DatetimeIndex(
['20190101 01', '20190101 0130', '20190101 02', '20190101 0230',
'20190101 03']))
assert_series_equal(out, exp)
def test_reindex_fill_slice_empty():
out = forecast.reindex_fill_slice( | pd.Series(dtype=float) | pandas.Series |
import pandas as pd
import joblib
import os
import sys
import argparse
from tqdm import tqdm
def setup_argparser():
argparser = argparse.ArgumentParser(sys.argv[0], conflict_handler='resolve')
argparser.add_argument("--path_to_data", type=str, help="path_to_output")
return argparser
def main(args):
path_to_data = os.path.join(args.path_to_data, "out")
metric_dfs = []
options_dfs = []
folders = os.listdir(path_to_data)
for folder in tqdm(folders, total=len(folders)):
if not os.path.isdir(os.path.join(path_to_data, folder)):
continue
if not os.path.isfile(os.path.join(path_to_data, folder, "metrics.p.gz")):
print("missing file for {0}".format(folder))
continue
metric_df = joblib.load(
os.path.join(path_to_data, folder, "metrics.p.gz")
)
metric_df['experiment_id'] = folder.split('_')[0]
metric_dfs.append(metric_df)
options = joblib.load(
os.path.join(path_to_data, folder, "options.p")
)
options_dfs.append(options)
all_metrics_df = pd.concat(metric_dfs, ignore_index=True)
all_metrics_df.to_csv(os.path.join(path_to_data, "metrics.csv.gz"),
compression="gzip")
all_options_df = | pd.concat(options_dfs, ignore_index=True, sort=True) | pandas.concat |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
ImportExtinctionRecallTaskLog.py
Import, tabulate, and plot Extinction Recall 3 log data.
Created 1/3/19 by DJ.
Updated 1/10/19 by DJ - adjusted to new VAS logging format, added GetVasTypes.
Updated 1/11/19 by DJ - bug fixes, comments.
Updated 2/25/19 by DJ - renamed PostDummyRun to PostRun3, stopped assuming sound check response was a float.
Updated 4/12/19 by DJ - updated to work from command line and with new task version (adding Sound VAS).
Updated 5/2/19 by DJ - added function to write BIDs-formatted events files,
added --makeBids flag to argparser, added run & tEnd columns to dfBlock.
Updated 9/24/19 by DJ - accommodate ratingscales with no locked-in response, removed old/redundant mood VAS categorization.
"""
# Import packages
import time # for timing analyses
import numpy as np # for math
import pandas as pd # for tables
from matplotlib import pyplot as plt # for plotting
import ast # for parameter parsing
import re # for splitting strings
import argparse # for command-line arguments
from glob import glob # for finding files
import os # for handling paths
# Import full log (including keypresses)
def ImportExtinctionRecallTaskLog(logFile):
# === Read in PsychoPy log
# Log start
print('Reading file %s...'%logFile)
t = time.time()
# Load file
with open(logFile) as f:
allLines = f.read().splitlines(True)
# Set up outputs
dfKey = pd.DataFrame(columns=['t','key'])
dfDisp = pd.DataFrame(columns=['t','stim','CS'])
dfSync = pd.DataFrame(columns=['t','value'])
dfBlock = pd.DataFrame(columns=['tStart','tEnd','type','run'])
dfVas = pd.DataFrame(columns=['imageFile','CSplusPercent','type','name','rating','timeToFirstPress','RT','run','group','block','trial','tImage','tStart','tEnd'])
params = {}
iKey = 0;
iDisp = 0;
iSync = 0;
iVas = 0;
iBlock = -1;
run = 0; # 1-based numbering
group = 0
block = 0
trial = 0
isParams = False;
# Read each line
for line in allLines:
# split into parts
data = line.split()
# Find params
if 'START PARAMETERS' in line:
isParams = True;
elif 'END PARAMETERS' in line:
isParams = False;
# Parse params
elif isParams: # parse parameter
key = data[2][:-1] # name of parameter
if len(data)==4:
try:
params[key] = float(data[3]) # if it's a number, convert to a float
except ValueError:
params[key] = data[3] # otherwise, record the string
elif data[3].startswith("["):
params[key] = ast.literal_eval(''.join(data[3:])) # if the parameter is a list, make it a list variable
else:
params[key] = ' '.join(data[3:])
# Parse data
elif len(data)>2:
if data[2]=='Keypress:': # time and key pressed
dfKey.loc[iKey,'t'] = float(data[0])
dfKey.loc[iKey,'key'] = data[3]
iKey +=1;
elif data[2]=='Display': # time and stim presented
dfDisp.loc[iDisp,'t'] = float(data[0])
dfDisp.loc[iDisp,'stim'] = data[3]
if len(data)>4: # if a CS level is specified...
trial +=1
dfDisp.loc[iDisp,'CS'] = data[4] # log it
# set VAS stimulus and type
dfVas.loc[iVas,'tImage'] = dfDisp.loc[iDisp,'t']
dfVas.loc[iVas,'imageFile'] = dfDisp.loc[iDisp,'stim']
dfVas.loc[iVas,'CSplusPercent'] = int(dfDisp.loc[iDisp,'CS'][6:])
dfVas.loc[iVas,'type'] = dfBlock.loc[iBlock,'type']
iDisp +=1;
elif data[2]=='set': # message time and text
dfSync.loc[iSync,'t'] = float(data[0])
dfSync.loc[iSync,'value'] = float(data[-1])
iSync +=1;
elif data[2]=='=====' and data[3]=='START' and data[4]=='RUN':
run +=1
elif data[2]=='====' and data[3]=='START' and data[4]=='GROUP':
group = int(data[5][0])
elif data[2]=='===' and data[3]=='START' and data[4]=='BLOCK': # block start time
block = int(data[5][0])
trial = 0
iBlock +=1;
dfBlock.loc[iBlock,'tStart'] = float(data[0])
dfBlock.loc[iBlock,'run'] = run
elif data[2]=='===' and data[3]=='END' and data[4]=='BLOCK': # block end time
dfBlock.loc[iBlock,'tEnd'] = float(data[0])
elif data[2]=='bottomMsg:':
if 'AFRAID' in line:
dfBlock.loc[iBlock,'type'] = 'afraid'
elif 'SCREAM' in line:
dfBlock.loc[iBlock,'type'] = 'scream'
elif data[2]=='RatingScale': # VAS time, rating, RT
if "rating=" in line:
dfVas.loc[iVas,'tStart'] = dfDisp.loc[iDisp-1,'t']
dfVas.loc[iVas,'tEnd'] = float(data[0])
dfVas.loc[iVas,'name'] = data[3][:-1]
value = float(data[-1].split("=")[-1])
dfVas.loc[iVas,'rating'] = value
# if it's an image vas, set indices
if dfVas.loc[iVas,'type'] in ['afraid','scream']:
dfVas.loc[iVas,'run'] = run
dfVas.loc[iVas,'group'] = group
dfVas.loc[iVas,'block'] = block
dfVas.loc[iVas,'trial'] = trial
# if the response timed out, advance without RT/history
if "timed out" in line:
dfVas.loc[iVas,'RT'] = np.nan;
# infer time to first keypress from
iKeys = np.where((dfKey.t>dfVas.loc[iVas,'tStart']) & (dfKey.key!=str(params['triggerKey'])[0]))[0]
if len(iKeys)>0:
dfVas.loc[iVas,'timeToFirstPress'] = dfKey.loc[iKeys[0],'t'] - dfVas.loc[iVas,'tStart'];
else:
dfVas.loc[iVas,'timeToFirstPress'] = np.nan;
if dfVas.loc[iVas,'type'] in ['afraid','scream']:
print('WARNING: image rating scale at t=%g (run %d group %d block %d trial %d) timed out! RT will be set to NaN, timeToFirstPress inferred from key-display interval.'%(dfVas.loc[iVas,'tStart'],run,group,block,trial))
else:
print('WARNING: mood rating scale at t=%g timed out! RT will be set to NaN, timeToFirstPress inferred from key-display interval.'%(dfVas.loc[iVas,'tStart']))
# increment VAS index
iVas +=1;
elif "RT=" in line:
value = float(data[-1].split("=")[-1])
dfVas.loc[iVas,'RT'] = value
elif "history=" in line:
# get time to first button presss
if len(re.split('\), |, |\)]',line))>3:
timeToPress = float(re.split('\), |, |\)]',line)[3])
else:
timeToPress = dfVas.loc[iVas,'RT'] # if no press, default to RT
dfVas.loc[iVas,'timeToFirstPress'] = timeToPress
# increment VAS index
iVas +=1;
print('Done! Took %.1f seconds.'%(time.time()-t))
print('Extracting VAS data...')
t = time.time()
# Parse out mood and sound VAS results
dfMoodVas = dfVas.loc[pd.isnull(dfVas['imageFile']),:]
dfMoodVas = dfMoodVas.drop(['imageFile','CSplusPercent','run','group','block','trial','tImage'],1)
# split into mood & sound
dfSoundVas = dfMoodVas.loc[dfMoodVas.name.str.startswith('SoundCheck'),:]
dfMoodVas = dfMoodVas.loc[~dfMoodVas.name.str.startswith('SoundCheck'),:]
# reset indices
dfSoundVas = dfSoundVas.reset_index(drop=True)
dfMoodVas = dfMoodVas.reset_index(drop=True)
# Parse out image VAS results
dfImageVas = dfVas.loc[pd.notnull(dfVas['imageFile']),:]
dfImageVas = dfImageVas.drop('name',1)
# add Mood VAS types
isTraining = 'Training' in logFile
dfMoodVas = GetVasTypes(params,dfMoodVas,isTraining)
# add Sound VAS types (assuming only one question per sound!!!)
dfSoundVas['group']=np.arange(dfSoundVas.shape[0])
dfSoundVas['groupName']=[x.split('-')[0] for x in dfSoundVas.name]
dfSoundVas['type']='loud'
print('Done! Took %.1f seconds.'%(time.time()-t))
# Return results
return params, dfMoodVas, dfSoundVas, dfImageVas, dfKey, dfDisp, dfSync, dfBlock
# Import VAS parts of log (excluding keypresses)
def ImportExtinctionRecallTaskLog_VasOnly(logFile):
# === Read in PsychoPy log
# Log start
print('Reading file %s...'%logFile)
t = time.time()
# Load file
with open(logFile) as f:
allLines = f.read().splitlines(True)
# Set up outputs
dfDisp = pd.DataFrame(columns=['t','stim','CS'])
dfBlock = pd.DataFrame(columns=['tStart','tEnd','type'])
dfVas = pd.DataFrame(columns=['imageFile','CSplusPercent','type','name','rating','timeToFirstPress','RT','run','group','block','trial','tImage','tStart','tEnd'])
params = {}
iDisp = 0;
iVas = 0;
iBlock = -1;
run = 0; # 1-based numbering
group = 0
block = 0
trial = 0
isParams = False;
# Read each line
for line in allLines:
# split into parts
data = line.split()
# Find params
if 'START PARAMETERS' in line:
isParams = True;
elif 'END PARAMETERS' in line:
isParams = False;
# Parse params
elif isParams: # parse parameter
key = data[2][:-1] # name of parameter
if len(data)==4:
try:
params[key] = float(data[3]) # if it's a number, convert to a float
except ValueError:
params[key] = data[3] # otherwise, record the string
elif data[3].startswith("["):
params[key] = ast.literal_eval(''.join(data[3:])) # if the parameter is a list, make it a list variable
else:
params[key] = ' '.join(data[3:])
# Parse data
elif len(data)>2:
if data[2]=='Display': # time and stim presented
dfDisp.loc[iDisp,'t'] = float(data[0])
dfDisp.loc[iDisp,'stim'] = data[3]
if len(data)>4: # if a CS level is specified...
trial +=1
dfDisp.loc[iDisp,'CS'] = data[4] # log it
# set VAS stimulus and type
dfVas.loc[iVas,'tImage'] = dfDisp.loc[iDisp,'t']
dfVas.loc[iVas,'imageFile'] = dfDisp.loc[iDisp,'stim']
dfVas.loc[iVas,'CSplusPercent'] = int(dfDisp.loc[iDisp,'CS'][6:])
dfVas.loc[iVas,'type'] = dfBlock.loc[iBlock,'type']
iDisp +=1;
elif data[2]=='=====' and data[3]=='START' and data[4]=='RUN':
run +=1
elif data[2]=='====' and data[3]=='START' and data[4]=='GROUP':
group = int(data[5][0])
elif data[2]=='===' and data[3]=='START' and data[4]=='BLOCK': # block start time
block = int(data[5][0])
trial = 0
iBlock +=1;
dfBlock.loc[iBlock,'tStart'] = float(data[0])
dfBlock.loc[iBlock,'run'] = run
elif data[2]=='===' and data[3]=='END' and data[4]=='BLOCK': # block end time
dfBlock.loc[iBlock,'tEnd'] = float(data[0])
elif data[2]=='bottomMsg:':
if 'AFRAID' in line:
dfBlock.loc[iBlock,'type'] = 'afraid'
elif 'SCREAM' in line:
dfBlock.loc[iBlock,'type'] = 'scream'
elif data[2]=='RatingScale': # VAS time, rating, RT
if "rating=" in line:
dfVas.loc[iVas,'tStart'] = dfDisp.loc[iDisp-1,'t']
dfVas.loc[iVas,'tEnd'] = float(data[0])
dfVas.loc[iVas,'name'] = data[3][:-1]
value = float(data[-1].split("=")[-1])
dfVas.loc[iVas,'rating'] = value
# if it's an image vas, set indices
if dfVas.loc[iVas,'type'] in ['afraid','scream']:
dfVas.loc[iVas,'run'] = run
dfVas.loc[iVas,'group'] = group
dfVas.loc[iVas,'block'] = block
dfVas.loc[iVas,'trial'] = trial
# if the response timed out, advance without RT/history
if "timed out" in line:
dfVas.loc[iVas,'RT'] = np.nan;
# NOTE: nan indicates unknown, not lack of keypress!
dfVas.loc[iVas,'timeToFirstPress'] = np.nan;
if dfVas.loc[iVas,'type'] in ['afraid','scream']:
print('WARNING: image rating scale at t=%g (run %d group %d block %d trial %d) timed out! RT and timeToFirstPress will be set to NaN.'%(dfVas.loc[iVas,'tStart'],run,group,block,trial))
else:
print('WARNING: mood rating scale at t=%g timed out! RT and timeToFirstPress will be set to NaN.'%(dfVas.loc[iVas,'tStart']))
# increment VAS index
iVas +=1;
elif "RT=" in line:
value = float(data[-1].split("=")[-1])
dfVas.loc[iVas,'RT'] = value
elif "history=" in line:
# get time to first button presss
if len(re.split('\), |, |\)]',line))>3:
timeToPress = float(re.split('\), |, |\)]',line)[3])
else:
timeToPress = dfVas.loc[iVas,'RT'] # if no press, default to RT
dfVas.loc[iVas,'timeToFirstPress'] = timeToPress
# increment VAS index
iVas +=1;
print('Done! Took %.1f seconds.'%(time.time()-t))
print('Extracting VAS data...')
t = time.time()
# Parse out mood and sound VAS results
dfMoodVas = dfVas.loc[pd.isnull(dfVas['imageFile']),:]
dfMoodVas = dfMoodVas.drop(['imageFile','CSplusPercent','run','group','block','trial','tImage'],1)
# split into mood & sound
dfSoundVas = dfMoodVas.loc[dfMoodVas.name.str.startswith('SoundCheck'),:]
dfMoodVas = dfMoodVas.loc[~dfMoodVas.name.str.startswith('SoundCheck'),:]
# reset indices
dfSoundVas = dfSoundVas.reset_index(drop=True)
dfMoodVas = dfMoodVas.reset_index(drop=True)
# Parse out image VAS results
dfImageVas = dfVas.loc[pd.notnull(dfVas['imageFile']),:]
dfImageVas = dfImageVas.drop('name',1)
# add Mood VAS types
isTraining = 'Training' in logFile
dfMoodVas = GetVasTypes(params,dfMoodVas,isTraining)
# add Sound VAS types (assuming only one question per sound!!!)
dfSoundVas['group']=np.arange(dfSoundVas.shape[0])
dfSoundVas['groupName']=[x.split('-')[0] for x in dfSoundVas.name]
dfSoundVas['type']='loud'
print('Done! Took %.1f seconds.'%(time.time()-t))
# Return results
return params, dfMoodVas, dfSoundVas, dfImageVas
# Add accurate group, groupName, and type columns to the dfMoodVas dataframe
def GetVasTypes(params,dfMoodVas,isTraining=False):
# declare constants
if isTraining:
vasGroups = ['PreRun1']
else:
vasGroups = ['PreSoundCheck','PostRun1','PostRun2','PostRun3']
magicWords = ['anxious','tired','worried are','mood','doing','feared']
# check each group file for magic words
for i,groupName in enumerate(vasGroups):
try:
vasFile = params['moodQuestionFile%d'%(i+1)]
print('reading %s...'%vasFile)
# read file to get list of questions
with open(vasFile,"r") as fi:
questions = []
for ln in fi:
if ln.startswith("?"):
questions.append(ln[1:])
for j,question in enumerate(questions):
isThis = dfMoodVas.name=='%s-%d'%(groupName,j)
dfMoodVas.loc[isThis,'group'] = i
dfMoodVas.loc[isThis,'groupName'] = groupName
for k,word in enumerate(magicWords):
if word in question:
dfMoodVas.loc[isThis,'type'] = word.split()[0]
except:
print('group %s not found.'%groupName)
return dfMoodVas # return modified dataframe
# Save figures of the image and mood VAS responses and RTs.
def SaveVasFigures(params,dfMoodVas,dfSoundVas,dfImageVas,outPrefix='ER3_'):
# Set up
outBase = os.path.basename(outPrefix) # filename without the folder
print('Plotting VAS data...')
t = time.time()
# === MOOD VAS === #
# Set up figure
moodFig = plt.figure(figsize=(8, 4), dpi=120, facecolor='w', edgecolor='k')
# Plot Ratings
plt.subplot(121)
# declare constants
if 'Training' in outPrefix:
vasGroups=['PreRun1']
else:
vasGroups = ['PreSoundCheck','PostRun1','PostRun2','PostRun3']
vasTypes = ['anxious','tired','worried','mood','doing','feared']
# vasTypes = dfMoodVas.type.unique()
for vasType in vasTypes:
isInType = dfMoodVas.type==vasType
plt.plot(dfMoodVas.loc[isInType,'group'],dfMoodVas.loc[isInType,'rating'],'.-',label=vasType)
plt.legend()
plt.xticks(range(len(vasGroups)),vasGroups)
plt.ylim([0,100])
plt.xticks(rotation=15)
plt.xlabel('group')
plt.ylabel('rating (0-100)')
plt.title('%s%d-%d\n Mood VAS Ratings'%(outBase,params['subject'],params['session']))
# Plot Reaction Times
plt.subplot(122)
for vasType in vasTypes:
isInType = dfMoodVas.type==vasType
plt.plot(dfMoodVas.loc[isInType,'group'],dfMoodVas.loc[isInType,'RT'],'.-',label=vasType)
plt.legend()
plt.xticks(range(len(vasGroups)),vasGroups)
plt.xticks(rotation=15)
plt.xlabel('group')
plt.ylabel('reaction time (s))')
plt.title('%s%d-%d\n Mood VAS RTs'%(outBase,params['subject'],params['session']))
# Save figure
outFile = '%s%d-%d_MoodVasFigure.png'%(outPrefix,params['subject'],params['session'])
print("Saving Mood VAS figure as %s..."%outFile)
moodFig.savefig(outFile)
# === SOUND CHECK VAS === #
# No sound checks in training task
if not 'Training' in outPrefix:
# declare constants
vasGroups = ['SoundCheck1','SoundCheck2','SoundCheck3']
vasTypes = ['loud']
# Set up figure
soundFig = plt.figure(figsize=(8, 4), dpi=120, facecolor='w', edgecolor='k')
# Plot Ratings
plt.subplot(121)
for vasType in vasTypes:
isInType = dfSoundVas.type==vasType
plt.plot(dfSoundVas.loc[isInType,'group'],dfSoundVas.loc[isInType,'rating'],'.-',label=vasType)
plt.legend()
plt.xticks(range(len(vasGroups)),vasGroups)
plt.ylim([0,100])
plt.xticks(rotation=15)
plt.xlabel('group')
plt.ylabel('rating (0-100)')
plt.title('%s subject %d session %d\n Sound VAS Ratings'%(outBase,params['subject'],params['session']))
# Plot Reaction Times
plt.subplot(122)
for vasType in vasTypes:
isInType = dfSoundVas.type==vasType
plt.plot(dfSoundVas.loc[isInType,'group'],dfSoundVas.loc[isInType,'RT'],'.-',label=vasType)
plt.legend()
plt.xticks(range(len(vasGroups)),vasGroups)
plt.xticks(rotation=15)
plt.xlabel('group')
plt.ylabel('reaction time (s))')
plt.title('%s subject %d session %d\n Sound VAS RTs'%(outBase,params['subject'],params['session']))
# Save figure
outFile = '%s%d-%d_SoundVasFigure.png'%(outPrefix,params['subject'],params['session'])
print("Saving Sound VAS figure as %s..."%outFile)
soundFig.savefig(outFile)
# === IMAGE VAS === #
# Plot image VAS results
imgFig = plt.figure(figsize=(8, 4), dpi=120, facecolor='w', edgecolor='k')
# Plot Ratings
plt.subplot(121)
vasTypes = dfImageVas.type.unique()
for vasType in vasTypes:
isInType = dfImageVas.type==vasType
plt.plot(dfImageVas.loc[isInType,'CSplusPercent'],dfImageVas.loc[isInType,'rating'],'.',label=vasType)
plt.legend()
plt.ylim([0,100])
plt.xticks(rotation=15)
plt.xlabel('CS plus level (%)')
plt.ylabel('rating (0-100)')
plt.title('%s%d-%d\n Image VAS Ratings'%(outBase,params['subject'],params['session']))
# Plot Reaction Times
plt.subplot(122)
for vasType in vasTypes:
isInType = dfImageVas.type==vasType
plt.plot(dfImageVas.loc[isInType,'CSplusPercent'],dfImageVas.loc[isInType,'RT'],'.',label=vasType)
plt.legend()
plt.xticks(rotation=15)
plt.xlabel('CS plus level (%)')
plt.ylabel('reaction time (s))')
plt.title('%s%d-%d\n Image VAS RTs'%(outBase,params['subject'],params['session']))
# Save figure
outFile = '%s%d-%d_ImageVasFigure.png'%(outPrefix,params['subject'],params['session'])
print("Saving Image VAS figure as %s..."%outFile)
imgFig.savefig(outFile)
print('Done! Took %.1f seconds.'%(time.time()-t))
# Convert mood VAS to a single line for logging to multi-subject spreadsheet
def GetSingleVasLine(params,dfVas,isTraining=False,isSoundVas=False):
# === Convert table to single line
# Declare names of VAS groups/types-within-groups (to be used in legends/tables)
if isTraining:
vasGroups = ['PreRun1']
else:
if isSoundVas:
vasGroups = ['SoundCheck1','SoundCheck2','SoundCheck3']
else:
vasGroups = ['PreSoundCheck','PostRun1','PostRun2','PostRun3'] # shorthand for each VAS group based on their position in the task
if isSoundVas:
vasTypes = ['loud']
else:
vasTypes = ['anxious','tired','worried','mood','doing','feared'] # shorthand for VAS0, VAS1, VAS2, etc.
# Convert
cols = ['subject','session','date']
for vasGroup in vasGroups:
for vasType in vasTypes:
cols = cols + ['%s_%s_rating'%(vasGroup,vasType)]
for vasGroup in vasGroups:
for vasType in vasTypes:
cols = cols + ['%s_%s_RT'%(vasGroup,vasType)]
# create dataframe
dfVas_singleRow = | pd.DataFrame(columns=cols) | pandas.DataFrame |
"""Baseado no MediaPipe, disponibilizado em: https://google.github.io/mediapipe/solutions/hands.html"""
import mediapipe as mp
import cv2
import pandas as pd
class DetectaMao:
def __init__(self):
self.mp_drawing = mp.solutions.drawing_utils
self.mp_hands = mp.solutions.hands
self.mp_drawing_styles = mp.solutions.drawing_styles
self.hands = self.mp_hands.Hands(
max_num_hands=1,
min_detection_confidence=0.5,
min_tracking_confidence=0.5)
def detectar(self, img):
"""Detecta mao e retorna coordenadas dedos. Recebe imagem BGR"""
results = self.hands.process(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
return results.multi_hand_landmarks
def desenhar(self, img, marcas):
"""Desenha os ligamentos da mao"""
# Faz copia, caso contrario altera a imagem passada como parametro, influenciando em quem chamou
imagem = img.copy()
for marca in marcas:
self.mp_drawing.draw_landmarks(
image=imagem,
landmark_list=marca,
connections=self.mp_hands.HAND_CONNECTIONS,
landmark_drawing_spec=self.mp_drawing_styles.get_default_hand_landmarks_style(),
connection_drawing_spec=self.mp_drawing_styles.get_default_hand_connections_style())
return imagem
def salvar_marcas_imagem(self, img, marcas, path_frame='frames', path_marca='marcas.csv'):
"""Salva todas marcas das maos em um arquivo csv"""
for marca in marcas:
file = | pd.read_csv(path_marca, header=0, sep=';') | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# ## Overview
# It is a follow-up notebook to "Fine-tuning ResNet34 on ship detection" (https://www.kaggle.com/iafoss/fine-tuning-resnet34-on-ship-detection/notebook) and "Unet34 (dice 0.87+)" (https://www.kaggle.com/iafoss/unet34-dice-0-87/notebook) that shows how to evaluate the solution and submit predictions. Please check these notebooks for additional details.
# In[ ]:
from fastai.conv_learner import *
from fastai.dataset import *
import pandas as pd
import numpy as np
import os
from PIL import Image
from sklearn.model_selection import train_test_split
from tqdm import tnrange, tqdm_notebook
from scipy import ndimage
# In[ ]:
PATH = './'
TRAIN = '../input/airbus-ship-detection/train_v2/'
TEST = '../input/airbus-ship-detection/test_v2/'
SEGMENTATION = '../input/airbus-ship-detection/train_ship_segmentations_v2.csv'
PRETRAINED_DETECTION_PATH = '../input/fine-tuning-resnet34-on-ship-detection/models/'
PRETRAINED_SEGMENTATION_PATH = '../input/unet34-dice-0-87/models/'
DETECTION_TEST_PRED = '../input/fine-tuning-resnet34-on-ship-detection-new-data/ship_detection.csv'
# In[ ]:
nw = 2 #number of workers for data loader
arch = resnet34 #specify target architecture
# ### Data
# In[ ]:
train_names = [f for f in os.listdir(TRAIN)]
test_names = [f for f in os.listdir(TEST)]
#5% of data in the validation set is sufficient for model evaluation
tr_n, val_n = train_test_split(train_names, test_size=0.05, random_state=42)
segmentation_df = pd.read_csv(os.path.join(PATH, SEGMENTATION)).set_index('ImageId')
# As explained in https://www.kaggle.com/iafoss/unet34-dice-0-87/notebook, I drop all images without ships. The model responsible for ship detection will take care of them.
# In[ ]:
def cut_empty(names):
return [name for name in names
if(type(segmentation_df.loc[name]['EncodedPixels']) != float)]
tr_n_cut = cut_empty(tr_n)
val_n_cut = cut_empty(val_n)
# In[ ]:
def get_mask(img_id, df):
shape = (768,768)
img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
masks = df.loc[img_id]['EncodedPixels']
if(type(masks) == float): return img.reshape(shape)
if(type(masks) == str): masks = [masks]
for mask in masks:
s = mask.split()
for i in range(len(s)//2):
start = int(s[2*i]) - 1
length = int(s[2*i+1])
img[start:start+length] = 1
return img.reshape(shape).T
# In[ ]:
class pdFilesDataset(FilesDataset):
def __init__(self, fnames, path, transform):
self.segmentation_df = pd.read_csv(SEGMENTATION).set_index('ImageId')
super().__init__(fnames, transform, path)
def get_x(self, i):
img = open_image(os.path.join(self.path, self.fnames[i]))
if self.sz == 768: return img
else: return cv2.resize(img, (self.sz, self.sz))
def get_y(self, i):
mask = np.zeros((768,768), dtype=np.uint8) if (self.path == TEST) else get_mask(self.fnames[i], self.segmentation_df)
img = Image.fromarray(mask).resize((self.sz, self.sz)).convert('RGB')
return np.array(img).astype(np.float32)
def get_c(self): return 0
# In[ ]:
def get_data(sz,bs):
tfms = tfms_from_model(arch, sz, crop_type=CropType.NO, tfm_y=TfmType.CLASS)
tr_names = tr_n if (len(tr_n_cut)%bs == 0) else tr_n[:-(len(tr_n_cut)%bs)] #cut incomplete batch
ds = ImageData.get_ds(pdFilesDataset, (tr_names,TRAIN),
(val_n_cut,TRAIN), tfms, test=(test_names,TEST))
md = ImageData(PATH, ds, bs, num_workers=nw, classes=None)
return md
# ### Model
# In[ ]:
cut,lr_cut = model_meta[arch]
# In[ ]:
def get_base(pre=True): #load ResNet34 model
layers = cut_model(arch(pre), cut)
return nn.Sequential(*layers)
# In[ ]:
class UnetBlock(nn.Module):
def __init__(self, up_in, x_in, n_out):
super().__init__()
up_out = x_out = n_out//2
self.x_conv = nn.Conv2d(x_in, x_out, 1)
self.tr_conv = nn.ConvTranspose2d(up_in, up_out, 2, stride=2)
self.bn = nn.BatchNorm2d(n_out)
def forward(self, up_p, x_p):
up_p = self.tr_conv(up_p)
x_p = self.x_conv(x_p)
cat_p = torch.cat([up_p,x_p], dim=1)
return self.bn(F.relu(cat_p))
class SaveFeatures():
features=None
def __init__(self, m): self.hook = m.register_forward_hook(self.hook_fn)
def hook_fn(self, module, input, output): self.features = output
def remove(self): self.hook.remove()
class Unet34(nn.Module):
def __init__(self, rn):
super().__init__()
self.rn = rn
self.sfs = [SaveFeatures(rn[i]) for i in [2,4,5,6]]
self.up1 = UnetBlock(512,256,256)
self.up2 = UnetBlock(256,128,256)
self.up3 = UnetBlock(256,64,256)
self.up4 = UnetBlock(256,64,256)
self.up5 = nn.ConvTranspose2d(256, 1, 2, stride=2)
def forward(self,x):
x = F.relu(self.rn(x))
x = self.up1(x, self.sfs[3].features)
x = self.up2(x, self.sfs[2].features)
x = self.up3(x, self.sfs[1].features)
x = self.up4(x, self.sfs[0].features)
x = self.up5(x)
return x[:,0]
def close(self):
for sf in self.sfs: sf.remove()
class UnetModel():
def __init__(self,model,name='Unet'):
self.model,self.name = model,name
def get_layer_groups(self, precompute):
lgs = list(split_by_idxs(children(self.model.rn), [lr_cut]))
return lgs + [children(self.model)[1:]]
# ### Score evaluation
# In[ ]:
def IoU(pred, targs):
pred = (pred > 0.5).astype(float)
intersection = (pred*targs).sum()
return intersection / ((pred+targs).sum() - intersection + 1.0)
# In[ ]:
def get_score(pred, true):
n_th = 10
b = 4
thresholds = [0.5 + 0.05*i for i in range(n_th)]
n_masks = len(true)
n_pred = len(pred)
ious = []
score = 0
for mask in true:
buf = []
for p in pred: buf.append(IoU(p,mask))
ious.append(buf)
for t in thresholds:
tp, fp, fn = 0, 0, 0
for i in range(n_masks):
match = False
for j in range(n_pred):
if ious[i][j] > t: match = True
if not match: fn += 1
for j in range(n_pred):
match = False
for i in range(n_masks):
if ious[i][j] > t: match = True
if match: tp += 1
else: fp += 1
score += ((b+1)*tp)/((b+1)*tp + b*fn + fp)
return score/n_th
# In this competition we should submit and individual mask for each identified ship. The simplest way to do it is splitting the total mask into individual ones based on the connectivity of detected objects.
# In[ ]:
def split_mask(mask):
threshold = 0.5
threshold_obj = 30 #ignor predictions composed of "threshold_obj" pixels or less
labled,n_objs = ndimage.label(mask > threshold)
result = []
for i in range(n_objs):
obj = (labled == i + 1).astype(int)
if(obj.sum() > threshold_obj): result.append(obj)
return result
# In[ ]:
def get_mask_ind(img_id, df, shape = (768,768)): #return mask for each ship
masks = df.loc[img_id]['EncodedPixels']
if(type(masks) == float): return []
if(type(masks) == str): masks = [masks]
result = []
for mask in masks:
img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
s = mask.split()
for i in range(len(s)//2):
start = int(s[2*i]) - 1
length = int(s[2*i+1])
img[start:start+length] = 1
result.append(img.reshape(shape).T)
return result
# In[ ]:
class Score_eval():
def __init__(self):
self.segmentation_df = | pd.read_csv(SEGMENTATION) | pandas.read_csv |
import pandas
from cutlass.parse import (
load_codes_and_clean,
Country,
parse_phonenumber,
parse_workbook,
)
def test_load_codes_and_clean():
codes = load_codes_and_clean()
for country, code in codes.items():
assert country != ""
assert code != ""
def test_dataclasses():
uk = Country("United Kingdom", "UK", "+44")
assert str(uk) == "<Country United Kingdom extension=+44>"
def test_phonenumber_parse():
phonenumber = pandas.Series(["+41 7555 04310"])
parsed = parse_phonenumber(phonenumber)
assert parsed[0] == "41755504310"
assert parsed[1] == "Switzerland"
assert parsed[2] == "CH"
phonenumber = pandas.Series(["+357 94 861838"])
parsed = parse_phonenumber(phonenumber)
assert parsed[0] == "35794861838"
assert parsed[1] == "Cyprus"
assert parsed[2] == "CY"
def test_phonenumber_parse_malformed():
phonenumber = pandas.Series(["(41)-7555-04310"])
parsed = parse_phonenumber(phonenumber)
assert parsed[0] == "41755504310"
assert parsed[1] == "Switzerland"
assert parsed[2] == "CH"
phonenumber = pandas.Series(["(0+357)-94-861838"])
parsed = parse_phonenumber(phonenumber)
assert parsed[0] == "35794861838"
assert parsed[1] == "Cyprus"
assert parsed[2] == "CY"
phonenumber = | pandas.Series(["+0357 (94)861838"]) | pandas.Series |
import argparse
from netZooPy.panda import Panda
import pandas as pd
import sys
DEFAULT_MOTIF_FILE = '/opt/software/resources/tissues_motif.tsv'
DEFAULT_PPI_FILE = '/opt/software/resources/tissues_ppi.tsv'
# maximum number of rows to keep
NMAX = 15000
def handle_dummy_args(args):
'''
Since this script is called by WDL, we need to handle
dummy args that WDL provides.
Since WDL command blocks can't handle optional inputs
(e.g. we can't conditionally include or exclude a flag
commandline arg to this script), we pass 'dummy' values.
This function substitutes the dummy values for actual, valid
paths to the optional PPI and motif files.
Modifies the args namespace directly.
'''
if args.motif == '__motif__':
args.motif = DEFAULT_MOTIF_FILE
if args.ppi == '__ppi__':
args.ppi = DEFAULT_PPI_FILE
def run_panda(args):
'''
Runs PANDA object creation and exports output to file.
'''
# Load the data as a pandas dataframes
exprs_df = pd.read_csv(args.exprs, index_col = 0, header = 0, sep = "\t")
motif_df = pd.read_csv(args.motif, header = None, sep = "\t")
ppi_df = pd.read_csv(args.ppi, header = None, sep = "\t")
# Adding headers for the PANDAs obj to read
motif_df.columns =['source','target','weight']
# subset the expression dataframe to retain only the top NMAX
# by mean expression. Otherwise, memory consumption is too much.
# covering a very fringe case here where this column might
# already be in the matrix. Just keep adding underscores to
# create a unique column name for the row-mean values.
mean_col_name = '__mean__'
while mean_col_name in exprs_df.columns:
mean_col_name = '_' + mean_col_name + '_'
exprs_df[mean_col_name] = exprs_df.apply(lambda x: x.mean(), axis=1)
# retain only the top NMAX and drop that mean value column since we're done with it.
exprs_df = exprs_df.nlargest(NMAX, mean_col_name)
exprs_df.drop(mean_col_name, axis=1, inplace=True)
# Running pandas with default expected paramaters
# save_memory = False results in outputting the PANDA network in edge format
# save_memory = True results in a matrix format
# Edge format results in massive memory usage after PANDA finishes
# LIONESS requires keep_expression_matrix
# Default modeProcess blows memory stack, so have to use "legacy".
# Pass the pandas dataframes directly rather than the PATHs.
panda_obj = Panda(
exprs_df,
motif_df,
ppi_df,
remove_missing = False,
keep_expression_matrix = True,
save_memory = False,
modeProcess = "legacy"
)
# Pull PANDA network out of object
out_mtx = | pd.DataFrame(panda_obj.panda_network) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#Prompt:Directory
chsavedir=''
noch=''
chsavedir=''
while True:
try:
ck3dirfile= open("ck3dir.txt", 'r',encoding="utf-8")
ck3dirfile.close()
prev=True
except (NameError,IndexError,FileNotFoundError):
print('No existing directories')
prev=False
break
try:
if prev is True:
print('Would you like to use your previous directories? y/n:')
usepre=input()
if usepre=='y':
ck3dirfile= open("ck3dir.txt", 'r',encoding="utf-8")
ck3dir= ck3dirfile.read().splitlines()
ck3dirfile.close()
textfile = open(fr'{ck3dir[0]}', 'r',encoding="utf-8")
cksave = textfile.read()
textfile.close()
namefile = open(fr'{ck3dir[1]}', 'r',encoding="utf-8")
locnames = namefile.read()
namefile.close()
dynfile=open(fr'{ck3dir[2]}', 'r',encoding="utf-8")
dynloc=dynfile.read()
dynfile.close()
noch=True
break
elif usepre!='y':
print('Would like to change only your savefile directory? y/n:')
chsave=input()
if chsave=='y':
ck3dirfile= open("ck3dir.txt", 'r',encoding="utf-8")
ck3dir= ck3dirfile.read().splitlines()
ck3dirfile.close()
namefile = open(fr'{ck3dir[1]}', 'r',encoding="utf-8")
locnames = namefile.read()
namefile.close()
dynfile=open(fr'{ck3dir[2]}', 'r',encoding="utf-8")
dynloc=dynfile.read()
dynfile.close()
chsavedir=True
break
elif chsave!='y':
chsavedir=False
break
except (NameError,IndexError,FileNotFoundError):
print('Invalid directories')
prev=False
break
while True:
try:
if chsavedir is True:
print(r'Enter your extracted ck3 savefile directory (eg. C:\Users\User PC\Documents\Paradox Interactive\Crusader Kings III\save games\mysave\gamestate.txt):')
save = input()
textfile = open(save, 'r',encoding="utf-8")
cksave = textfile.read()
textfile.close()
else:
break
except FileNotFoundError:
print("One or more file directory is incorrectly inputted. Please try again.")
continue
if bool(cksave) is True and bool(locnames) is True and bool(dynloc) is True:
with open('ck3dir.txt','+w',encoding="utf-8") as file:
file.writelines([f'{save}\n{ck3dir[1]}\n{ck3dir[2]}'])
file.close()
print('Changed savefile directory in ck3dir.txt.')
break
while True:
try:
if prev is True and noch is True:
break
elif prev is True and chsavedir is True:
break
elif prev is True and chsavedir is False:
pass
elif prev is False:
pass
print(r'Enter your extracted ck3 savefile directory (eg. C:\Users\User PC\Documents\Paradox Interactive\Crusader Kings III\save games\mysave\gamestate.txt):')
save = input()
print(r'Enter your localized name file directory (eg. C:\Program Files (x86)\Steam\steamapps\common\Crusader Kings III\game\localization\english\names\character_names_l_english.yml):')
loc= input()
print(r'Enter your localized dynasty name file directory (eg. C:\Program Files (x86)\Steam\steamapps\common\Crusader Kings III\game\localization\english\dynasties\dynasty_names_l_english.yml):')
locdyn=input()
textfile = open(save, 'r',encoding="utf-8")
cksave = textfile.read()
textfile.close()
namefile = open(loc, 'r',encoding="utf-8")
locnames = namefile.read()
namefile.close()
dynfile=open(locdyn, 'r',encoding="utf-8")
dynloc=dynfile.read()
dynfile.close()
except FileNotFoundError:
print("One or more file directory is incorrectly inputted. Please try again.")
continue
if bool(cksave) is True and bool(locnames) is True and bool(dynloc) is True:
with open('ck3dir.txt','+w',encoding="utf-8") as file:
file.writelines([f'{save}\n{loc}\n{locdyn}'])
file.close()
print('Saved directories as ck3dir.txt for future use')
break
# In[2]:
while True:
print("Type your house or dynasty name:")
dynastyname=input()
print("Type your house or dynasty head character id (Use debug mode to get the id):")
a=input()
print("Do you want to include cadet branches? y/n:")
branch=input()
if branch=="y":
branch=True
elif branch!="y":
branch=False
print("Proceed with these inputs? y/n:")
confirm=input()
if confirm=="y":
break
elif confirm!="y":
continue
# In[3]:
import re
import pandas as pd
import unidecode
b="={\s*first_name"
c=a+b
pat=re.compile(fr"\b{c}.*?(?=\d=)",re.DOTALL)
pat1=re.compile("(\d.*)(?=\={)")
pat2=re.compile("(?<=dynasty_house=)(.*)")
pat3=re.compile("(?<=first_name=\")(.*)(?<!\")")
pat4=re.compile("(?<=birth=)(.*)")
pat5=re.compile(r"(?<=\bdate=)(.*)")
pat6=re.compile("(?<=sexuality=)(.*)")
pat7=re.compile("(?<=child=)(.*)")
#pat8 in surname
#pat 9 in surname
pat10=re.compile('landed_titles={\n0=.*?(?=dynasties={)',re.DOTALL)
#pat11 in gettitle
# In[4]:
#Generate Family ID. Gramps needs a family id created for the parent corresponding to both role in marriage (Mother/Father) as well we Parent/Child. Eg.:F10000
famidlist=[]
def excel_format(num):
res = ""
while num:
mod = (num - 1) % 26
res = chr(65 + mod) + res
num = (num - mod) // 26
return res
def full_format(num, d=3):
chars = num // (10**d-1) + 6 # this becomes A..ZZZ
digit = num % (10**d-1) + 1 # this becomes 001..999
return excel_format(chars) + "{:0{}d}".format(digit, d)
for i in range(9999):
i, full_format(i, d=4)
fid1=full_format(i, d=4)
fid2=famidlist.append(fid1)
# In[5]:
#Truncate Person data for easier manipulation
patex=re.compile(r"\bliving={.*?(?=prune_queue={)",re.DOTALL)
exall=patex.finditer(cksave)
for ex1 in exall:
ex1.group()
ex1=ex1.group()
#Capture Head Person/Dynasty
allmatch=pat.finditer(ex1,re.DOTALL)
for match in allmatch:
match.group()
match=match.group()
dynmatch=re.finditer(r'(\bdynasties={)(.*)(?<=\bdynasties={)',cksave,re.DOTALL)
for dyn in dynmatch:
dyn.group()
dyn=dyn.group()
testweird=re.search('ONCLICK:CHARACTER',dyn)
if testweird is not None:
dyn=re.sub(r"\WONCLICK:CHARACTER,\d*\W\WTOOLTIP:CHARACTER,\d*\W\WL\W","",dyn)
#capture title data
alltitle=pat10.finditer(cksave)
for title1 in alltitle:
title1.group()
titleloc=title1.group()
allsurname=pat2.finditer(match)
for surname in allsurname:
surname.group()
surnamemain=surname.group()
houseid=surnamemain
surname=dynastyname
surnameb=houseid
if bool(branch) is True:
bshort='\d*=\{(?:(?!\d*=\{)[\s\S\r])*?parent_dynasty_house='
branchid=re.findall(fr'{bshort}{houseid}',dyn)
branchid=[i for i in branchid if re.search(fr'{surname}',i) is None]
branchid=[re.sub('=[\s\S\r]+','',i,re.DOTALL) for i in branchid]
else:
pass
branchid=''
# In[6]:
#Grab Character ID
def getid():
global idnum
allidnum=pat1.finditer(match)
for idnum in allidnum:
idnum.group()
idnum=idnum.group()
getid()
# In[7]:
#Grab Surname
def lastname():
global surname
global surnameb
global dyname
surname=[]
surnameb=[]
allsurname=pat2.finditer(match)
for surname in allsurname:
surname.group()
surname=surname.group()
surnameb=surname
else:
pass
if surname==houseid:
surname=dynastyname
elif bool(surname) is True and surname!=houseid:
e=surname
f="={\s\s"
g=e+f
h="={"
i=e+h
testkey=re.search(fr"\b{i}\s*key",dyn)
if testkey is not None:
surname=re.findall(fr'\b{i}\s*key=\"(.*)"',dyn)
surname=surname[0]
surname=re.findall('(?<=_)[^_]+$',surname)
surname=surname[0]
surname=surname.title()
surname= unidecode.unidecode(surname)
surname=surname.title()
elif re.search(fr'(?<=\b{g})localized_name="(.*)"',dyn) is not None:
dyname=re.findall(fr'(?<=\b{g}).*name="(.*)"',dyn)
dyname=dyname[0]
surname=dyname
surname= unidecode.unidecode(surname)
surname=surname.title()
else:
dyname=re.findall(fr'(?<=\b{g}).*name="(.*)"',dyn)
dyname=dyname[0]
surname=re.findall(fr'\b{dyname}\b:.*"(.*)"',dynloc)
surname=surname[0]
surname=surname
surname= unidecode.unidecode(surname)
surname=surname.title()
else:
surname="None"
# In[8]:
#Grab Given Name
def given():
global firstname
firstname=re.findall('first_name="(.*)"',match)
firstname=firstname[0]
if re.search(fr'\b{firstname}\b',locnames) is not None:
firstname=re.findall(fr'(?<=\s){firstname}\b:.*"(.*)"',locnames)
firstname=firstname[0]
firstname= unidecode.unidecode(firstname)
firstname=firstname.title()
else:
firstname= unidecode.unidecode(firstname)
firstname=firstname.title()
given()
# In[9]:
#Grab Birthday
def birthday():
global birth
allbirth=pat4.finditer(match)
for birth in allbirth:
birth.group()
birth=birth.group()
birth=birth.replace(".", "-")
birthday()
# In[10]:
#Grab Death day
def deathdate():
global death
testdeath=re.search(r"\bdead_data={",match)
if testdeath is not None:
alldeath=pat5.finditer(match)
for death in alldeath:
death.group()
death=death.group()
death=death.replace(".","-")
else:
death=""
pass
deathdate()
# In[11]:
#Grab Gender
def mkgender():
global gender
testgender=re.search("female=yes",match)
if testgender is not None:
gender="female"
else:
gender="male"
mkgender()
# In[12]:
# Grabe Title
def gettitle():
global titled
global rank
if re.search(fr'domain=',match) is None:
titled=""
elif re.search(fr'domain=',match) is not None:
liegeall=re.finditer(fr'(?<=domain=).*',match)
for liege in liegeall:
liege.group()
liege=liege.group()
liegeall2=re.finditer(r'(?<={\s)\b\d*\b',liege)
for liege in liegeall2:
liege.group()
liege=liege.group()
f='={'
liege1=liege+f
pat11=re.compile(fr'\b{liege1}.*?(?=date)',re.DOTALL)
rankall=pat11.finditer(titleloc)
for rank in rankall:
rank.group()
rank=rank.group()
if re.search('key="b',rank) is not None and gender!='female':
rankname="Baron of "
elif re.search('key="b',rank) is not None and gender=='female':
rankname="Baroness of "
elif re.search('key="c',rank) is not None and gender!='female':
rankname="Count of "
elif re.search('key="c',rank) is not None and gender=='female':
rankname="Countess of "
elif re.search('key="d',rank) is not None and gender!='female':
rankname="Duke of "
elif re.search('key="d',rank) is not None and gender=='female':
rankname="Duchess of "
elif re.search('key="k',rank) is not None and gender!='female':
rankname="King of "
elif re.search('key="k',rank) is not None and gender=='female':
rankname="Queen of "
elif re.search('key="e',rank) is not None and gender!='female':
rankname="Emperor of "
elif re.search('key="e',rank) is not None and gender=='female':
rankname="Empress of "
elif re.search('key="x',rank) is not None:
rankname="Leader of the "
else:
rankname="Titled"
rankall=re.finditer('(?<=name=).*',rank)
for rank in rankall:
rank.group()
rank=rank.group()
rank=rank.strip('""')
titled=rankname+rank
titled= unidecode.unidecode(titled)
titled=titled.title()
else:
pass
gettitle()
# In[13]:
#Grab Child
def tmpchild():
global childlist
testchild=re.search("(?<=child=).*",match)
if testchild is not None and surname==dynastyname:
allchild=pat7.finditer(match)
for child in allchild:
child.group()
child=child.group()
child=child.replace('{',"").replace('}',"")
childlist= list(child.split(" "))
childlist.pop(0)
childlist.pop(-1)
elif testchild is not None and re.search(dynastyname,surname) is not None and bool(branch) is True:
allchild=pat7.finditer(match)
for child in allchild:
child.group()
child=child.group()
child=child.replace('{',"").replace('}',"")
childlist= list(child.split(" "))
childlist.pop(0)
childlist.pop(-1)
else:
pass
for i in branchid:
if testchild is not None and i==surnameb and bool(branch) is True:
allchild=pat7.finditer(match)
for child in allchild:
child.group()
child=child.group()
child=child.replace('{',"").replace('}',"")
childlist= list(child.split(" "))
childlist.pop(0)
childlist.pop(-1)
break
else:
continue
tmpchild()
# In[14]:
#Create Family relationship with children and role in marriage. Nonbinary genders default to Father role but will be displayed as unknown on the chart.
def family():
global famid
testchild=re.search("(?<=child=).*",match)
if testchild is not None and surname==dynastyname:
famid=famidlist[0]
famidlist.pop(0)
elif testchild is not None and re.search(dynastyname,surname) is not None and bool(branch) is True:
famid=famidlist[0]
famidlist.pop(0)
else:
pass
for i in branchid:
if testchild is not None and i==surnameb and bool(branch) is True:
famid=famidlist[0]
famidlist.pop(0)
break
else:
continue
family()
def marry():
global husband
global wife
if bool(famid) is True and gender=="male":
husband=idnum
wife=""
elif bool(famid) is True and gender=="female":
wife=idnum
husband=""
elif bool(famid) is True and gender!="male" and gender!="female":
husband=idnum
wife=""
else:
pass
marry()
# In[15]:
#Creating Head Person Table
if bool(death) is True and bool(titled) is True:
d1={'person':idnum,'surname':surname,'given':firstname,'gender':gender,'birth date':birth,'death date':death,'title':titled}
persondata=pd.DataFrame(data=d1,index=[0])
elif bool(death) is False and bool(titled) is True:
d1={'person':idnum,'surname':surname,'given':firstname,'gender':gender,'birth date':birth,'death date':'','title':titled}
persondata=pd.DataFrame(data=d1,index=[0])
elif bool(death) is True and bool(titled) is False:
d1={'person':idnum,'surname':surname,'given':firstname,'gender':gender,'birth date':birth,'death date':death,'title':''}
persondata=pd.DataFrame(data=d1,index=[0])
elif bool(death) is False and bool(titled) is False:
d1={'person':idnum,'surname':surname,'given':firstname,'gender':gender,'birth date':birth,'death date':'','title':''}
persondata=pd.DataFrame(data=d1,index=[0])
#Creating Head Marriage Table
if bool(husband) is True:
d2={'marriage':famid,'husband':husband,'wife':''}
marrydata=pd.DataFrame(data=d2,index=[0])
elif bool(wife) is True:
d2={'marriage':famid,'husband':'','wife':wife}
marrydata= | pd.DataFrame(data=d2,index=[0]) | pandas.DataFrame |
from sklearn.datasets import load_breast_cancer
from sklearn import __version__ as sklearn_version
from sklearn.linear_model import RidgeClassifier, LogisticRegression
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, GradientBoostingClassifier
from sklearn.metrics import log_loss, accuracy_score, roc_auc_score, f1_score
import pandas as pd
import numpy as np
from linear_stacker import BinaryClassificationLinearPredictorStacker
try:
from sklearn.model_selection import KFold
except ImportError:
from sklearn.cross_validation import KFold
def sigmoid(x):
return 1 / (1 + np.exp(- x))
def get_folds(data):
"""returns correct folding generator for different versions of sklearn"""
if sklearn_version.split('.')[1] == '18':
# Module model_selection is in the distribution
kf = KFold(n_splits=5, shuffle=True, random_state=1)
return kf.split(data)
else:
# Module model_selection is not in the distribution
kf = KFold(n=len(data), n_folds=5, shuffle=True, random_state=1)
return kf
def main():
pd.options.display.max_rows = 600
# Load breast cancer dataset
dataset = load_breast_cancer()
# Split in data and target
classifiers = [
('ridge', RidgeClassifier(alpha=0.0001, normalize=True, random_state=0)),
('logit', LogisticRegression(C=0.01, random_state=1)),
('xtr', ExtraTreesClassifier(n_estimators=50, max_features=.4, max_depth=10, random_state=2, n_jobs=-1)),
('rfr', RandomForestClassifier(n_estimators=50, max_features=.2, max_depth=10, random_state=3, n_jobs=-1)),
('gbr', GradientBoostingClassifier(n_estimators=100, max_depth=2,learning_rate=.1,random_state=4))
]
# Go through classifiers
oof_labels = np.zeros((len(dataset.data), len(classifiers)))
oof_probas = np.zeros((len(dataset.data), len(classifiers)))
for reg_i, (name, reg) in enumerate(classifiers):
# compute out of fold (OOF) predictions
for trn_idx, val_idx in get_folds(dataset.data):
# Split data in training and validation sets
trn_X, trn_Y = dataset.data[trn_idx], dataset.target[trn_idx]
val_X = dataset.data[val_idx]
# Fit the classifier
reg.fit(trn_X, trn_Y)
# Predict OOF data
if hasattr(reg, 'predict_proba'):
oof_probas[val_idx, reg_i] = reg.predict_proba(val_X)[:, 1]
else:
oof_probas[val_idx, reg_i] = sigmoid(reg.predict(val_X))
oof_labels[val_idx, reg_i] = reg.predict(val_X)
# Display OOF score
print("Accuracy for classifier %6s : %.5f" % (name, accuracy_score(dataset.target, oof_labels[:, reg_i])))
print("Log_loss for classifier %6s : %.5f" % (name, log_loss(dataset.target, oof_probas[:, reg_i])))
print("Roc_auc for classifier %6s : %.5f" % (name, roc_auc_score(dataset.target, oof_probas[:, reg_i])))
# Stacking using labels
print('Stacking using labels \n'
'=====================')
print("\tLog loss Benchmark using labels' average : %.5f" % (log_loss(dataset.target, np.mean(oof_labels, axis=1))))
stackers = [
# Linear Stacker with labels, normed weights
('Standard Linear Stacker (normed weights)',
BinaryClassificationLinearPredictorStacker(metric=log_loss,
algo='standard',
max_iter=1000,
verbose=0,
normed_weights=True,
# step=0.01
)),
# Linear Stacker with labels, no weight constraint
('Standard Linear Stacker (no constraint)',
BinaryClassificationLinearPredictorStacker(metric=log_loss,
algo='standard',
max_iter=1000,
verbose=0,
normed_weights=False,
# step=0.01
)),
# Linear Stacker with labels normed weights swapping algo
('Swapping Linear Stacker (normed weights)',
BinaryClassificationLinearPredictorStacker(metric=log_loss,
algo='swapping',
max_iter=1000,
verbose=0,
normed_weights=True,
# step=0.01
)),
# Linear Stacker with labels no weights constraints swapping algo
('Swapping Linear Stacker (no constraint)',
BinaryClassificationLinearPredictorStacker(metric=log_loss,
algo='swapping',
max_iter=1000,
verbose=0,
normed_weights=False,
# step=0.01
))
]
for description, stacker in stackers:
# Fit stacker
stacker.fit(pd.DataFrame(oof_labels, columns=[name for (name, _) in classifiers]),
pd.Series(dataset.target, name='target'))
# display results
print("\tAccuracy %s: %.5f"
% (description, accuracy_score(dataset.target, stacker.predict(oof_labels))))
print("\tF1_score %s: %.5f"
% (description, f1_score(dataset.target, stacker.predict(oof_labels))))
print("\tLog loss %s: %.5f"
% (description, log_loss(dataset.target, stacker.predict_proba(oof_labels))))
print("\tAUC score %s: %.5f"
% (description, roc_auc_score(dataset.target, stacker.predict_proba(oof_labels))))
# Stacking using labels
print('Stacking using probabilities \n'
'============================')
print("\tLog loss Benchmark using probas' average : %.5f" % (log_loss(dataset.target, np.mean(oof_probas, axis=1))))
for description, stacker in stackers:
# Fit stacker
stacker.fit(pd.DataFrame(oof_probas, columns=[name for (name, _) in classifiers]),
| pd.Series(dataset.target, name='target') | pandas.Series |
import pandas as pd
from shapely.geometry import LineString, Point
from syspy.spatial import spatial, zoning
from syspy.transitfeed import feed_links
# seconds
def to_seconds(time_string):
return pd.to_timedelta(time_string).total_seconds()
def point_geometry(row):
return Point(row['stop_lon'], row['stop_lat'])
def linestring_geometry(dataframe, point_dict, from_point, to_point):
df = dataframe.copy()
def geometry(row):
return LineString(
(point_dict[row[from_point]], point_dict[row[to_point]]))
return df.apply(geometry, axis=1)
class BaseGtfsImporter():
"""
importer = BaseGtfsImporter(gtfs_path)
importer.read()
importer.build()
sm = stepmodel.StepModel()
sm.links = importer.links
sm.nodes = importer.stops
"""
def __init__(self, gtfs_path):
self.gtfs_path = gtfs_path
def read(self, encoding=None):
self.stop_times = pd.read_csv(
self.gtfs_path + 'stop_times.txt',
encoding=encoding,
)
self.trips = pd.read_csv(
self.gtfs_path + 'trips.txt',
encoding=encoding,
low_memory=False # mixed types
)
self.routes = pd.read_csv(
self.gtfs_path + 'routes.txt',
encoding=encoding
)
self.stops = pd.read_csv(self.gtfs_path + 'stops.txt', encoding=encoding)
def pick_trips(self):
# one trip by direction
self.trips = | pd.merge(self.trips, self.routes[['route_id']]) | pandas.merge |
"""
MIT License
Copyright (c) 2016 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import pandas as pd
import logging
from functools import partial
from zeex.core.compat import QtGui, QtCore
from zeex.core.models.actions import FileViewModel
from zeex.core.ctrls.dataframe import DataFrameModelManager
from zeex.core.ui.actions.merge_purge_ui import Ui_MergePurgeDialog
from zeex.core.utility.collection import DictConfig, SettingsINI
from zeex.core.utility.pandatools import gather_frame_fields
from zeex.core.utility.widgets import create_standard_item_model
from zeex.core.views.basic.map_grid import MapGridDialog
from zeex.core.views.basic.push_grid import PushGridHandler
from zeex.core.ctrls.dataframe import DataFrameModel
class MergePurgeDialog(QtGui.QDialog, Ui_MergePurgeDialog):
"""
This dialog allows a user to do large updates on a given source DataFrameModel.
- Merging other file(s) with the source based on common keys/fields
- Purging records from the source using other file(s) based on common keys/fields
- Sorting the DataFrame by multiple columns/ascending/descending
- Deduplicating the DataFrame based on common keys/fields
Settings can exported to a config.ini file and re-imported at a later time.
"""
signalMergeFileOpened = QtCore.Signal(str) # file path
signalSFileOpened = QtCore.Signal(str) # file path
signalSourcePathSet = QtCore.Signal(str) #file path
signalExecuted = QtCore.Signal(str, str, str) # source_path, dest_path, report_path
def __init__(self, df_manager: DataFrameModelManager, parent=None, source_model=None):
"""
:param df_manager: (DataFrameModelManager)
This will be used to handle reading/updating of DataFrameModels used
in the operation.
:param parent: (QMainWindow)
:param source_model: (DataFrameModel)
An optional source DataFrameModel
"""
self.df_manager = df_manager
QtGui.QDialog.__init__(self, parent)
self.setupUi(self)
self.source_model = source_model
self._merge_view_model = FileViewModel()
self._suppress_view_model = FileViewModel()
self._purge_files = {}
self._merge_files = {}
self._field_map_grids = {}
self._field_map_data = {}
self.sortAscHandler = None
self.sortOnHandler = None
self.dedupeOnHandler = None
self.uniqueFieldsHandler = None
self.gatherFieldsHandler = None
self.configure()
if self.source_model is not None:
self.set_source_model(source_model, configure=True)
def configure(self, source_path=None, dest_path=None):
"""
Connects main buttons and actions.
:param source_path: (str, default None)
If this is None there must be a valid path already in the sourcePathLineEdit or an AssertionError raises.
:param dest_path: (str, default None)
Optional custom destination path to be added to the destPathLineEdit.
:return: None
"""
if source_path is None:
source_path = self.sourcePathLineEdit.text()
if os.path.isfile(source_path):
self.set_line_edit_paths(source_path, dest_path=dest_path)
if self.sortAscHandler is None:
self.set_handler_sort_asc()
source_func = partial(self.open_file, model_signal=self.signalSourcePathSet)
self.signalSourcePathSet.connect(self.set_source_model_from_browse)
self.btnBrowseSourcePath.clicked.connect(source_func)
self.btnBrowseDestPath.clicked.connect(self.set_dest_path_from_browse)
self.signalMergeFileOpened.connect(self.add_merge_file)
merge_file_func = partial(self.open_file, model_signal=self.signalMergeFileOpened)
self.btnAddMergeFile.clicked.connect(merge_file_func)
self.btnBrowseMergeFile.clicked.connect(merge_file_func)
self.btnDeleteMergeFile.clicked.connect(partial(self.remove_file, self.mergeFileTable))
self.btnEditMergeFile.clicked.connect(partial(self.open_edit_file_window, self.mergeFileTable, self._merge_files))
self.mergeFileTable.setModel(self._merge_view_model)
self.signalSFileOpened.connect(self.add_purge_file)
sfile_func = partial(self.open_file, model_signal=self.signalSFileOpened)
self.btnEditSFile.clicked.connect(partial(self.open_edit_file_window, self.sFileTable, self._purge_files))
self.btnDeleteSFile.clicked.connect(partial(self.remove_file, self.sFileTable))
self.btnAddSFile.clicked.connect(sfile_func)
self.btnBrowseSFile.clicked.connect(sfile_func)
self.sFileTable.setModel(self._suppress_view_model)
self.btnMapSFields.clicked.connect(partial(self.open_field_map, self.sFileTable, self._purge_files))
self.btnMapMergeFields.clicked.connect(partial(self.open_field_map, self.mergeFileTable, self._merge_files))
self.btnExecute.clicked.connect(self.execute)
self.btnExportTemplate.clicked.connect(self.export_settings)
self.btnImportTemplate.clicked.connect(self.import_settings)
self.btnReset.clicked.connect(self.reset)
def set_source_model_from_browse(self, filepath):
self.set_line_edit_paths(filepath, dest_path=False)
self.set_source_model(configure=True)
def set_dest_path_from_browse(self, filepath=None):
if filepath is None:
try:
dirname = os.path.dirname(self.df_manager.last_path_read)
except:
dirname = ''
filepath = QtGui.QFileDialog.getOpenFileName(self, dir=dirname)[0]
self.destPathLineEdit.setText(filepath)
def set_source_model(self, model=None, configure=True):
"""
Sets the source DataFrameModel for the Dialog.
:param model: (DataFrameModel)
The DataFrameModel to be set.
:param configure:
True re-configures file path line edits and the listviews.
:return:
"""
if not hasattr(model, 'dataFrame'):
if model is None:
model = self.sourcePathLineEdit.text()
if isinstance(model, str) and os.path.exists(model):
model = self.df_manager.read_file(model)
else:
raise Exception("model parameter must be a filepath or a qtpandas.models.DataFrameModel")
if self.source_model is not None:
models_different = model.filePath != self.source_model.filePath
if models_different:
try:
self.source_model.dataFrameChanged.disconnect(self.sync)
except RuntimeError:
pass
else:
models_different = True
if models_different:
self.source_model = model
self.source_model.dataFrameChanged.connect(self.sync)
if configure:
self.sync()
def sync(self):
df = self.source_model.dataFrame()
cols = df.columns.tolist()
if self.dedupeOnHandler is None or self.uniqueFieldsHandler is None:
self.set_push_grid_handlers()
else:
self.dedupeOnHandler.set_model_from_list(cols)
self.gatherFieldsHandler.set_model_from_list(cols)
self.sortOnHandler.set_model_from_list(cols)
self.uniqueFieldsHandler.set_model_from_list(cols)
self.set_primary_key_combo_box()
self.set_line_edit_paths(source_path=self.source_model.filePath)
def set_line_edit_paths(self, source_path=None, dest_path=None):
"""
Sets the source/destination line edits in the Dialog.
:param source_path: (str, default None)
An optional valid filepath for the source DataFrameModel.
If None, :param dest_path cannot be None.
:param dest_path: (str, default None)
An optional destination path. One will be created automatically
if None is given.
False will prevent the destination path from being set at all.
:return: None
"""
assert any([dest_path, source_path]), "source_path or dest_path must be set."
if dest_path is None:
dirname = os.path.dirname(source_path)
base, ext = os.path.splitext(os.path.basename(source_path))
dest_path = os.path.join(dirname, base + "_merged" + ext)
if source_path:
self.sourcePathLineEdit.setText(source_path)
if dest_path:
self.destPathLineEdit.setText(dest_path)
def set_push_grid_handlers(self, column_model=None, sorton_model=None, sortasc_model=None,
dedupe_model=None, gather_model=None, unique_model=None):
"""
Sets all default push grid handlers for the dialog.
:param column_model: (QStandardItemModel, default None)
:param sorton_model: ((QStandardItemModel,list) default None)
:param sortasc_model: ((QStandardItemModel,list) default None)
:param dedupe_model: ((QStandardItemModel,list) default None)
:return:
"""
if column_model is None:
column_model = self.get_source_columns_model()
self.set_handler_sort_on(column_model=None, default_model=sorton_model)
self.set_handler_sort_asc(default_model=sortasc_model)
self.set_handler_dedupe_on(column_model=None, default_model=dedupe_model)
self.set_handler_gather_fields(column_model=None, default_model=gather_model)
self.set_handler_unique_fields(column_model=None, default_model=unique_model)
def set_handler_sort_on(self, column_model=None, default_model=None):
if column_model is None:
column_model = self.get_source_columns_model()
self.sortOnHandler = PushGridHandler(left_model=column_model, left_view=self.sortOnLeftView,
left_button=self.sortOnLeftButton,
left_delete=True, right_model=default_model,
right_view=self.sortOnRightView,
right_button=self.sortOnRightButton)
def set_handler_sort_asc(self, default_model=None, overwrite=False):
if self.sortAscHandler is None or default_model is not None or overwrite:
sort_asc = QtGui.QStandardItemModel()
sort_asc.appendRow(QtGui.QStandardItem('True'))
sort_asc.appendRow(QtGui.QStandardItem('False'))
self.sortAscHandler = PushGridHandler(left_model=sort_asc, left_view=self.sortAscLeftView,
left_button=self.sortAscLeftButton,
left_delete=False, right_model=default_model,
right_view=self.sortAscRightView,
right_button=self.sortAscRightButton)
def set_handler_dedupe_on(self, column_model=None, default_model=None):
if column_model is None:
column_model = self.get_source_columns_model()
self.dedupeOnHandler = PushGridHandler(left_model=column_model, left_view=self.dedupeOnLeftView,
left_button=self.dedupeOnLeftButton,
left_delete=True, right_model=default_model,
right_view=self.dedupeOnRightView,
right_button=self.dedupeOnRightButton)
def set_handler_gather_fields(self, column_model=None, default_model=None):
if column_model is None:
column_model = self.get_source_columns_model()
self.gatherFieldsHandler = PushGridHandler(left_model=column_model,
left_view=self.gatherFieldsListViewLeft,
left_button=self.gatherFieldsButtonLeft,
left_delete=True, right_model=default_model,
right_view=self.gatherFieldsListViewRight,
right_button=self.gatherFieldsButtonRight)
def set_handler_unique_fields(self, column_model=None, default_model=None):
if column_model is None:
column_model = self.get_source_columns_model()
self.uniqueFieldsHandler = PushGridHandler(left_model=column_model,
left_view=self.uniqueFieldsListViewLeft,
left_button=self.uniqueFieldsPushButtonLeft,
left_delete=True, right_model=default_model,
right_view=self.uniqueFieldsListViewRight,
right_button=self.uniqueFieldsPushButtonRight)
def get_source_columns_model(self, raise_on_error=True) -> QtGui.QStandardItemModel:
"""
Quick way to get a QStandardItemModel form the DataFrameModel's columns.
:param raise_on_error: (bool, default True)
Raises an error if the source_model has not yet been set.
:return: (QtGui.QStandardItemModel)
"""
if self.source_model is None:
if raise_on_error:
raise Exception("Cannot get source_columns as source_model is None!")
else:
columns = []
else:
columns = self.source_model.dataFrame().columns.tolist()
return create_standard_item_model(columns)
def open_file(self, file_names: list=None, model_signal=None, allow_multi=True):
"""
Opens a Merge or Purge file (or really any file) and calls the
given model signal after registering the DataFrameModel with the DataFrameModelManager.
:param file_names: (list, default None)
An optional list of filenames to open.
The user must select filenames otherwise.
:param model_signal: (QtCore.Signal)
A signal to be called after successfully reading the DataFrameModel.
:param allow_multi: (bool, default True)
True allows multiple files to be read (and the signal called each time).
False allows only the first file to be read.
:return: None
You can call MergePurgeDialog.df_manager.get_frame(filename) to
retrieve a DataFrameModel.
"""
if file_names is None:
dirname = os.path.dirname(self.sourcePathLineEdit.text())
file_names = QtGui.QFileDialog.getOpenFileNames(parent=self,
dir=dirname)[0]
if isinstance(file_names, str):
file_names = list(file_names)
assert not isinstance(file_names, str) and hasattr(file_names, "__iter__"), "file_names is not list-like!"
if allow_multi is False:
file_names = list(file_names[0])
for f in file_names:
try:
if not isinstance(f, str) and hasattr(f, '__iter__'):
f = f[0]
if os.path.exists(f):
self.df_manager.read_file(f)
if model_signal is not None:
model_signal.emit(f)
logging.info("Emitted signal: {}".format(f))
except Exception as e:
logging.error(e)
@QtCore.Slot(str)
def add_merge_file(self, file_path):
"""
Adds a merge file to the merge view and
also updates the internal dictionary storing the filepath/model.
:param file_path: (str)
The file path to add.
:return: None
"""
model = self.df_manager.get_model(file_path)
model.enableEditing(True)
self._merge_files.update({file_path:model})
self._merge_view_model.append_df_model(model)
self.mergeFileTable.setColumnWidth(0, 500)
self._merge_view_model.setHorizontalHeaderLabels(['filepath', 'count'])
@QtCore.Slot(str)
def add_purge_file(self, file_path):
"""
Adds a purge file to the purge view and
also updates the internal dictionary storing the filepath/model.
:param file_path: (str)
The file path to add.
:return: None
"""
model = self.df_manager.get_model(file_path)
model.enableEditing(True)
self._purge_files.update({file_path:model})
self._suppress_view_model.append_df_model(model)
self.sFileTable.setColumnWidth(0, 500)
self._suppress_view_model.setHorizontalHeaderLabels(['filepath', 'count'])
def remove_file(self, view, indexes=None):
"""
Removes selected file(s) from the given view.
:param view: (QListView)
The view to drop the selected indexes on.
:param indexes: (list, default None)
A list of given indexes to drop.
Otherwise relies on selected indexes in the view.
:return: None
"""
if indexes is None:
indexes = [x.row() for x in view.selectedIndexes()]
model = view.model()
for idx in indexes:
model.takeRow(idx)
def open_field_map(self, view, models):
"""
Connects a MapGridDialog to help the user map field names that
are different between the source DataFrameModel and the
selected merge or suppression DataFrameModel.
:param view: (QtGui.QTableView)
The view that has a selected filepath
:param models: (dict)
The dictionary of {file_path:DataFrameModel} where
dataframe columns can be gathered from.
:return: None
"""
idx = view.selectedIndexes()[0]
view_model = view.model()
view_item = view_model.item(idx.row())
view_item_text = view_item.text()
try:
self._field_map_grids[view_item_text].show()
except KeyError:
dfmodel = models[view_item_text]
colmodel = dfmodel._dataFrame.columns.tolist()
if self.source_model is None:
self.set_source_model()
source_colmodel = self.source_model._dataFrame.columns.tolist()
fmap = MapGridDialog(parent=self)
fmap.load_combo_box(source_colmodel, left=True)
fmap.load_combo_box(colmodel, left=False)
fmap.setWindowTitle("Map Fields")
fmap.labelLeft.setText(os.path.basename(self.source_model.filePath))
fmap.labelRight.setText(os.path.basename(dfmodel.filePath))
fmap.signalNewMapping.connect(lambda x: self._field_map_data.update({dfmodel.filePath: x}))
self._field_map_grids[view_item_text] = fmap
self._field_map_grids[view_item_text].show()
def get_map_grid(self, file_path):
"""
Accessor to the MergePurgeDialog._field_map_grids dictionary.
Contains map grid dialogs.
:param file_path: (str)
The filepath related to the desired MapGridDialog.
:return: (MapGridDialog, None)
"""
return self._field_map_grids.get(file_path, None)
def open_edit_file_window(self, view, models):
"""
Connects a DataFrameModel selected in the view
to a FileTableWindow where the model can be edited.
:param view: (QtGui.QTableView)
The view that has a selected filepath
:param models: (dict)
The dictionary of {file_path:DataFrameModel}
to supply the FileTableWindow
:return: None
"""
try:
idx = view.selectedIndexes()[0]
except IndexError:
raise IndexError("No file selected to open.")
vmodel = view.model()
vitem = vmodel.item(idx.row())
model = models.get(vitem.text())
fp = model.filePath
wdw = self.df_manager.get_fileview_window(fp)
# Prevent wierdos from doing an endless loop of MergePurge windows.
# That would be pretty funny, though..
wdw.actionMergePurge.setVisible(False)
wdw.show()
def execute(self):
"""
Executes the merge_purge based upon the given settings.
:return: None
"""
if self.source_model is None:
self.set_source_model()
suppressed_results = {}
merged_results = {}
source_path = self.sourcePathLineEdit.text()
dest_path = self.destPathLineEdit.text()
source_df = self.source_model.dataFrame().copy()
source_df.loc[:, 'ORIG_IDXER'] = source_df.index
source_size = source_df.index.size
index_label = self.primaryKeyComboBox.currentText()
sort_on = self.sortOnHandler.get_model_list(left=False)
ascending = self.sortAscHandler.get_model_list(left=False)
dedupe_on = self.dedupeOnHandler.get_model_list(left=False)
gather_fields = self.gatherFieldsHandler.get_model_list(left=False)
overwrite_existing = self.gatherFieldsOverWriteCheckBox.isChecked()
# Make sure ascending/sort_on lists are equal.
while len(sort_on) < len(ascending):
ascending.append(False)
while len(sort_on) > len(ascending):
ascending.pop()
# Get all merge models and merge.
# Absorb all rows and columns
for file_path, merge_model in self._merge_files.items():
pre_size = source_df.index.size
other_df = merge_model.dataFrame()
if gather_fields:
assert index_label in other_df.columns, "DataFrameModel for {} missing column {}".format(
merge_model.filePath, index_label)
source_df = gather_frame_fields(source_df, other_df, index_label=index_label,
fields=gather_fields, copy_frames=True,
append_missing=True, overwrite=overwrite_existing)
else:
source_df = | pd.concat([source_df, other_df]) | pandas.concat |
from into.convert import convert, list_to_numpy, iterator_to_numpy_chunks
from into.chunks import chunks
from datashape import discover
from toolz import first
from collections import Iterator
import datetime
import datashape
import numpy as np
import pandas as pd
def test_basic():
assert convert(tuple, [1, 2, 3]) == (1, 2, 3)
def test_array_to_set():
assert convert(set, np.array([1, 2, 3])) == set([1, 2, 3])
def eq(a, b):
c = a == b
if isinstance(c, (np.ndarray, pd.Series)):
c = c.all()
return c
def test_set_to_Series():
assert eq(convert(pd.Series, set([1, 2, 3])),
| pd.Series([1, 2, 3]) | pandas.Series |
# AUTOGENERATED! DO NOT EDIT! File to edit: DataPipelineNotebooks/3.PrepMLData.ipynb (unless otherwise specified).
__all__ = ['PrepML']
# Cell
import xarray as xr
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
import time
from functools import partial
from datetime import datetime
import datetime
import os
import pickle
# Cell
class PrepML:
def __init__(self, data_root, interpolate=1, date_start='2015-11-01', date_end='2020-04-30', date_train_test_cutoff='2019-11-01'):
"""
Initialize the class
Keyword Arguments
data_root: the root path of the data folders which contains the 4.GFSFiltered1xInterpolationZarr
interpolate: the amount of interpolation applied in in the previous ParseGFS notebook (used for finding the correct input/output paths)
date_start: Earlist date to include in label set (default: '2015-11-01')
date_end: Latest date to include in label set (default: '2020-04-30')
date_train_test_cutoff: Date to use as a cutoff between the train and test labels (default: '2019-11-01')
"""
self.data_root = data_root
self.interpolation = interpolate
self.date_start = date_start
self.date_end = date_end
self.date_train_test_cutoff = date_train_test_cutoff
self.nc_path = data_root + '/3.GFSFiltered'+ str(self.interpolation) + 'xInterpolation/'
self.processed_path = data_root + '/4.GFSFiltered'+ str(self.interpolation) + 'xInterpolationZarr/'
self.path_to_labels = data_root + 'CleanedForecastsNWAC_CAIC_UAC.V1.2013-2020.csv'
self.ml_path = data_root + '/5.MLData'
self.date_col = 'Day1Date'
self.region_col = 'UnifiedRegion'
self.parsed_date_col = 'parsed_date'
if not os.path.exists(self.ml_path):
os.makedirs(self.ml_path)
#map states to regions for purposes of data lookup
self.regions = {
'Utah': ['Abajos', 'Logan', 'Moab', 'Ogden', 'Provo',
'Salt Lake', 'Skyline', 'Uintas'],
'Colorado': ['Grand Mesa Zone', 'Sangre de Cristo Range', 'Steamboat Zone', 'Front Range Zone',
'Vail Summit Zone', 'Sawatch Zone', 'Aspen Zone',
'North San Juan Mountains', 'South San Juan Mountains', 'Gunnison Zone'],
'Washington': ['Mt Hood', 'Olympics', 'Snoqualmie Pass', 'Stevens Pass',
'WA Cascades East, Central', 'WA Cascades East, North', 'WA Cascades East, South',
'WA Cascades West, Central', 'WA Cascades West, Mt Baker', 'WA Cascades West, South'
]
}
@staticmethod
def lookup_forecast_region(label_region):
"""
mapping between region names as the labels and the forecasts have slightly different standards
TODO: could add a unified mapping upstream in parseGFS files or in the label generation
Keyword Arguments:
label_region: region as defined in the labels file
returns the region as defined in the features
"""
if label_region == 'Mt Hood':
return 'Mt Hood'
elif label_region == 'Olympics':
return 'Olympics'
elif label_region == 'Cascade Pass - Snoq. Pass':
return 'Snoqualmie Pass'
elif label_region == 'Cascade Pass - Stevens Pass':
return 'Stevens Pass'
elif label_region == 'Cascade East - Central':
return 'WA Cascades East, Central'
elif label_region == 'Cascade East - North':
return 'WA Cascades East, North'
elif label_region == 'Cascade East - South':
return 'WA Cascades East, South'
elif label_region == 'Cascade West - Central':
return 'WA Cascades West, Central'
elif label_region == 'Cascade West - North':
return 'WA Cascades West, Mt Baker'
elif label_region == 'Cascade West - South':
return 'WA Cascades West, South'
elif label_region == 'Abajo':
return 'Abajos'
elif label_region == 'Logan':
return 'Logan'
elif label_region == 'Moab':
return 'Moab'
elif label_region == 'Ogden':
return 'Ogden'
elif label_region == 'Provo':
return 'Provo'
elif label_region == 'Salt Lake':
return 'Salt Lake'
elif label_region == 'Skyline':
return 'Skyline'
elif label_region == 'Uintas':
return 'Uintas'
elif label_region == 'Grand Mesa':
return 'Grand Mesa Zone'
elif label_region == 'Sangre de Cristo':
return 'Sangre de Cristo Range'
elif label_region == 'Steamboat & Flat Tops':
return 'Steamboat Zone'
elif label_region == 'Front Range':
return 'Front Range Zone'
elif label_region == 'Vail & Summit County':
return 'Vail Summit Zone'
elif label_region == 'Sawatch Range':
return 'Sawatch Zone'
elif label_region == 'Aspen':
return 'Aspen Zone'
elif label_region == 'Northern San Juan':
return 'North San Juan Mountains'
elif label_region == 'Southern San Juan':
return 'South San Juan Mountains'
elif label_region == 'Gunnison':
return 'Gunnison Zone'
else:
return 'Got region ' + label_region + ' but its an unknown region'
@staticmethod
def date_to_season(d):
"""
mapping of date to season
Keyword Arguments
d: datetime64
returns season indicator
"""
if d >= np.datetime64('2014-11-01') and d <= np.datetime64('2015-04-30'):
return (np.datetime64('2014-11-01'), '14-15')
elif d >= np.datetime64('2015-11-01') and d <= np.datetime64('2016-04-30'):
return (np.datetime64('2015-11-01'), '15-16')
elif d >= np.datetime64('2016-11-01') and d <= np.datetime64('2017-04-30'):
return (np.datetime64('2016-11-01'), '16-17')
elif d >= np.datetime64('2017-11-01') and d <= np.datetime64('2018-04-30'):
return (np.datetime64('2017-11-01'), '17-18')
elif d >= np.datetime64('2018-11-01') and d <= np.datetime64('2019-04-30'):
return (np.datetime64('2018-11-01'), '18-19')
elif d >= np.datetime64('2019-11-01') and d <= np.datetime64('2020-04-30'):
return (np.datetime64('2019-11-01'), '19-20')
else:
#print('Unknown season ' + str(d))
return (None,'Unknown')
def get_state_for_region(self, region):
"""
Returns the state for a given region
Keywork Arguments
region: region we want to lookup the state for
"""
for k in self.regions.keys():
if region in self.regions[k]:
return k
raise Exception('No region with name ' + region)
def prep_labels(self, overwrite_cache=True):
"""
Preps the data and lable sets in to two sets, train & test
Keyword Arguments
overwrite_cache: True indicates we want to recalculate the lat/lon combos, False indicates use the values if they exist in the cache file (otherwise calcualte and cache it)
returns the train & test sets
"""
#find the season
nc_date = np.datetime64(self.date_start)
nc_season = PrepML.date_to_season(nc_date)[1]
#maintaining this as a dict since the arrays are ragged and its more efficient this way
#storing one sample for each region to get the lat/lon layout
region_zones = []
region_data = {}
for region in self.regions.keys():
for r in self.regions[region]:
region_zones.append(r)
region_data[r] = xr.open_dataset(self.nc_path + nc_season + '/Region_' + r + '_' + pd.to_datetime(nc_date).strftime('%Y%m%d') + '.nc')
#Read in all the label data
self.labels = pd.read_csv(self.path_to_labels, low_memory=False,
dtype={'Day1Danger_OctagonAboveTreelineEast': 'object',
'Day1Danger_OctagonAboveTreelineNorth': 'object',
'Day1Danger_OctagonAboveTreelineNorthEast': 'object',
'Day1Danger_OctagonAboveTreelineNorthWest': 'object',
'Day1Danger_OctagonAboveTreelineSouth': 'object',
'Day1Danger_OctagonAboveTreelineSouthEast': 'object',
'Day1Danger_OctagonAboveTreelineSouthWest': 'object',
'Day1Danger_OctagonAboveTreelineWest': 'object',
'Day1Danger_OctagonBelowTreelineEast': 'object',
'Day1Danger_OctagonBelowTreelineNorth': 'object',
'Day1Danger_OctagonBelowTreelineNorthEast': 'object',
'Day1Danger_OctagonBelowTreelineNorthWest': 'object',
'Day1Danger_OctagonBelowTreelineSouth': 'object',
'Day1Danger_OctagonBelowTreelineSouthEast': 'object',
'Day1Danger_OctagonBelowTreelineSouthWest': 'object',
'Day1Danger_OctagonBelowTreelineWest': 'object',
'Day1Danger_OctagonNearTreelineEast': 'object',
'Day1Danger_OctagonNearTreelineNorth': 'object',
'Day1Danger_OctagonNearTreelineNorthEast': 'object',
'Day1Danger_OctagonNearTreelineNorthWest': 'object',
'Day1Danger_OctagonNearTreelineSouth': 'object',
'Day1Danger_OctagonNearTreelineSouthEast': 'object',
'Day1Danger_OctagonNearTreelineSouthWest': 'object',
'Day1Danger_OctagonNearTreelineWest': 'object',
'SpecialStatement': 'object',
'image_paths': 'object',
'image_types': 'object',
'image_urls': 'object'})
self.labels['parsed_date'] = pd.to_datetime(self.labels[self.date_col], format='%Y%m%d')
metadata_cols = [self.date_col, self.region_col]
#ensure we are only using label data for regions we are looking at
#return region_zones
self.labels[self.region_col] = self.labels.apply(lambda x : PrepML.lookup_forecast_region(x[self.region_col]), axis=1)
self.labels = self.labels[self.labels[self.region_col].isin(region_zones)]
self.labels = self.labels[self.labels[self.region_col]!='Unknown region']
#add a season column
tmp = pd.DataFrame.from_records(self.labels[self.parsed_date_col].apply(PrepML.date_to_season).reset_index(drop=True))
self.labels.reset_index(drop=True, inplace=True)
self.labels['season'] = tmp[1]
#some region/seasons have excessive errors in the data, remove those
self.labels = self.labels[self.labels['season'].isin(['15-16', '16-17', '17-18', '18-19', '19-20'])]
self.labels = self.labels[~self.labels.index.isin(self.labels[(self.labels['season']=='15-16') & (self.labels[self.region_col]=='Steamboat Zone')].index)]
self.labels = self.labels[~self.labels.index.isin(self.labels[(self.labels['season']=='16-17') & (self.labels[self.region_col]=='Front Range Zone')].index)]
lat_lon_union = pd.DataFrame()
lat_lon_path = self.processed_path + 'lat_lon_union.csv'
if overwrite_cache or not os.path.exists(lat_lon_path):
#find union of all lat/lon/region to just grids with values
#the process to filter the lat/lon is expensive but we need to do it here (1-5 seconds per region)
#as the helps the batch process select relevant data
for r in region_data.keys():
print(r)
region_df = region_data[r].stack(lat_lon = ('latitude', 'longitude')).lat_lon.to_dataframe()
tmp_df = pd.DataFrame.from_records(region_df['lat_lon'], columns=['latitude', 'longitude'])
indexes_to_drop = []
for index, row in tmp_df.iterrows():
#TODO: there might be a more efficient way than doing this one by one?
if 0 == np.count_nonzero(region_data[r].to_array().sel(latitude=row['latitude'], longitude=row['longitude']).stack(time_var = ('time', 'variable')).dropna(dim='time_var', how='all').values):
indexes_to_drop.append(index)
tmp_df.drop(indexes_to_drop, axis=0, inplace=True)
tmp_df[self.region_col] = r
lat_lon_union = pd.concat([lat_lon_union, tmp_df])
#cache the data
lat_lon_union.to_csv(lat_lon_path)
else:
#load the cached data
lat_lon_union = pd.read_csv(lat_lon_path,float_precision='round_trip')
#join in with the labels so we have a label per lat/lon pair
lat_lon_union = lat_lon_union.set_index(self.region_col, drop=False).join(self.labels.set_index(self.region_col, drop=False), how='left', lsuffix='left', rsuffix='right')
#define the split between train and test
date_min = np.datetime64(self.date_start)
date_max = np.datetime64(self.date_end)
train_date_cutoff = np.datetime64(self.date_train_test_cutoff)
#split the train/test data
labels_data_union = lat_lon_union[lat_lon_union[self.parsed_date_col] >= date_min]
labels_data_union = labels_data_union[labels_data_union[self.parsed_date_col] <= date_max]
#copy so we can delete the overall data and only keep the filtered
labels_data_train = labels_data_union[labels_data_union[self.parsed_date_col] <= train_date_cutoff].copy()
labels_data_test = labels_data_union[labels_data_union[self.parsed_date_col] > train_date_cutoff].copy()
labels_data_train.reset_index(inplace=True)
labels_data_test.reset_index(inplace=True)
return labels_data_train, labels_data_test
def augment_labels_with_trends(self, label_to_add_trend_info='Day1DangerAboveTreelineValue'):
raise NotImplementedError('Method is not fully implemented or tested')
#add extra labels which also allow us to have labels which indicate the trend in the avy direction
#the thought here is that predicting a rise or flat danger is usually easier than predicting when
#to lower the danger so seperating these in to seperate clases
#TODO: this should be dynamic based on label passed in, not hard coded to above treeline
labels_trends = pd.DataFrame()
for r in self.labels[self.region_col].unique():
for s in self.labels['season'].unique():
region_season_df = self.labels[self.labels['season']==s]
region_season_df = region_season_df[region_season_df[self.region_col]==r]
if(len(region_season_df) == 0):
continue
region_season_df.sort_values(by='parsed_date', inplace=True)
region_season_df.reset_index(inplace=True, drop=True)
region_season_df[label_to_add_trend_info] = region_season_df['Day1DangerAboveTreeline'].map({'Low':0, 'Moderate':1, 'Considerable':2, 'High':3})
region_season_df.loc[0,'Day1DangerAboveTreelineWithTrend'] = region_season_df.iloc[0]['Day1DangerAboveTreeline'] + '_Initial'
for i in range(1,len(region_season_df)):
prev = region_season_df.iloc[i-1]['Day1DangerAboveTreelineValue']
cur = region_season_df.loc[i,'Day1DangerAboveTreelineValue']
trend = '_Unknown'
if prev == cur:
trend = '_Flat'
elif prev < cur:
trend = '_Rising'
elif prev > cur:
trend = '_Falling'
region_season_df.loc[i,'Day1DangerAboveTreelineWithTrend'] = region_season_df.iloc[i]['Day1DangerAboveTreeline'] + trend
labels_trends = pd.concat([labels_trends,region_season_df])
assert(len(labels_trends)==len(self.labels))
self.labels = labels_trends
def get_data_zarr(self, region, lat, lon, lookback_days, date, variables=None):
"""
utility to get data for a specific point
Keyword Arguments
region: the region the point exists in
lat: the latitude of the point to lookup
lon: the longitude of the point to lookup
lookback_days: the number of days prior to the date to also return
date: the date which marks the end of the dataset (same date as the desired label)
variables: filter to just these variables (default: None indicates return all variables)
"""
#print(region + ' ' + str(lat) + ', ' + str(lon) + ' ' + str(date))
state = self.get_state_for_region(region)
earliest_data, season = PrepML.date_to_season(date)
path = self.processed_path + '/' + season + '/' + state + '/Region_' + region + '.zarr'
#print('*Opening file ' + path)
tmp_ds = xr.open_zarr(path, consolidated=True)
#filter to just the variables we want
#TODO: this may be more efficient if we use the open_zarr drop to not even read the variables
if variables is not None:
tmp_ds = tmp_ds.sel(variable=tmp_ds.variable.isin(variables))
start_day = date - np.timedelta64(lookback_days-1, 'D')
#print('start day ' + str(start_day))
tmp_ds = tmp_ds.sel(latitude=lat, longitude=lon, method='nearest').sel(time=slice(start_day, date))
date_values_pd = pd.date_range(start_day, periods=lookback_days, freq='D')
#reindex should fill missing values with NA
tmp_ds = tmp_ds.reindex({'time': date_values_pd})
tmp_ds = tmp_ds.reset_index(dims_or_levels='time', drop=True).load()
return tmp_ds
def get_data_zarr_batch(self, region, season, df, lookback_days, variables=None):
"""
utility to get data for a set of points
Keyword Arguments
region: the region the point exists in
season: the season the data is in
df: DataFrame of label rows to pull the data for (should all be from the same region and season)
lookback_days: the number of days prior to the date to also return
variables: filter to just these variables (default: None indicates return all variables)
"""
state = self.get_state_for_region(region)
path = self.processed_path + '/' + season + '/' + state + '/Region_' + region + '.zarr'
#print('*Opening file ' + path)
tmp_ds = xr.open_zarr(path, consolidated=True)
#finds the minimal set of values for the single zarr collection and then appends
#the individaul data to results
results = []
lats = df['latitude'].unique()
lons = df['longitude'].unique()
tmp_ds = xr.open_zarr(path, consolidated=True)
min_ds = tmp_ds.sel(latitude=lats, longitude=lons)
#filter to just the variables we want
#TODO: this may be more efficient if we use the open_zarr drop to not even read the variables
if variables is not None:
min_ds = min_ds.sel(variable=min_ds.variable.isin(variables))
for d in df.iterrows():
d = d[1]
date = d['parsed_date']
start_day = date - np.timedelta64(lookback_days-1, 'D')
result_df = min_ds.sel(latitude=d['latitude'], longitude=d['longitude']).sel(time=slice(start_day, date))
#print(str(d['latitude']) + ' ' + str(d['longitude']) + ' ' + str(start_day) + ' ' + str(date))
#return result_df
date_values_pd = | pd.date_range(start_day, periods=lookback_days, freq='D') | pandas.date_range |
# coding: utf-8
# ## General information
#
# This kernel is dedicated to EDA of PetFinder.my Adoption Prediction challenge as well as feature engineering and modelling.
#
# 
# (a screenshot of the PetFinder.my site)
#
# In this dataset we have lots of information: tabular data, texts and even images! This gives a lot of possibilties for feature engineering and modelling. The only limiting factor is the fact that the competition is kernel-only. On the other hand this will ensure everyone has the same computational resources.
#
# In this kernel I want to pay attention to several things:
# * comparing distribution of features in train and test data;
# * exploring features and their interactions;
# * trying various types of feature engineering;
# * trying various models without neural nets (for now);
#
# It is important to remember that this competition has stage 2, so our models will run against unseen data.
#
# *Work still in progress*
# In[1]:
#libraries
import numpy as np
import pandas as pd
import os
import json
import seaborn as sns
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
plt.style.use('ggplot')
import lightgbm as lgb
import xgboost as xgb
import time
import datetime
from PIL import Image
from wordcloud import WordCloud
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.metrics import mean_squared_error, roc_auc_score
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
import gc
from catboost import CatBoostClassifier
from tqdm import tqdm_notebook
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
import random
import warnings
warnings.filterwarnings("ignore")
from functools import partial
pd.set_option('max_colwidth', 500)
pd.set_option('max_columns', 500)
pd.set_option('max_rows', 100)
import os
import scipy as sp
from math import sqrt
from collections import Counter
from sklearn.metrics import confusion_matrix as sk_cmatrix
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.tokenize import TweetTokenizer
from sklearn.ensemble import RandomForestClassifier
import langdetect
import eli5
from IPython.display import display
from sklearn.metrics import cohen_kappa_score
def kappa(y_true, y_pred):
return cohen_kappa_score(y_true, y_pred, weights='quadratic')
# In[2]:
breeds = pd.read_csv('../input/breed_labels.csv')
colors = pd.read_csv('../input/color_labels.csv')
states = pd.read_csv('../input/state_labels.csv')
train = pd.read_csv('../input/train/train.csv')
test = pd.read_csv('../input/test/test.csv')
sub = pd.read_csv('../input/test/sample_submission.csv')
train['dataset_type'] = 'train'
test['dataset_type'] = 'test'
all_data = pd.concat([train, test])
# ## Data overview
#
# Let's have a quick look at the data first!
# In[3]:
print(os.listdir("../input"))
# In[4]:
train.drop('Description', axis=1).head()
# In[5]:
train.info()
# * We have almost 15 thousands dogs and cats in the dataset;
# * Main dataset contains all important information about pets: age, breed, color, some characteristics and other things;
# * Desctiptions were analyzed using Google's Natural Language API providing sentiments and entities. I suppose we could do a similar thing ourselves;
# * There are photos of some pets;
# * Some meta-information was extracted from images and we can use it;
# * There are separate files with labels for breeds, colors and states;
#
# Let's start with the main dataset.
#
# I have also created a full dataset by combining train and test data. This is done purely for more convenient visualization. Column "dataset_type" shows which dataset the data belongs to.
# ## Main data exploration
# ### Target: Adoption speed
#
# * 0 - Pet was adopted on the same day as it was listed.
# * 1 - Pet was adopted between 1 and 7 days (1st week) after being listed.
# * 2 - Pet was adopted between 8 and 30 days (1st month) after being listed.
# * 3 - Pet was adopted between 31 and 90 days (2nd & 3rd month) after being listed.
# * 4 - No adoption after 100 days of being listed. (There are no pets in this dataset that waited between 90 and 100 days).
# In[6]:
train['AdoptionSpeed'].value_counts().sort_index().plot('barh', color='teal');
plt.title('Adoption speed classes counts');
# A small note on how annotating works:
# * When I use seaborn countplot, I assign the figure to a variable - this allows to change its attributes and go deeper into its parameters;
# * Figure has `Axes` - bars - which contain information about color, transparency and other parameters;
# * And `patches` in `Axes` contain this information;
# * So we can take information from 'patches`, for example width and height of each bar, and plot correct text in correct places
#
# https://matplotlib.org/users/artists.html
# In[7]:
plt.figure(figsize=(14, 6));
g = sns.countplot(x='AdoptionSpeed', data=all_data.loc[all_data['dataset_type'] == 'train']);
plt.title('Adoption speed classes rates');
ax=g.axes
# In[8]:
#Axes
ax
# In[9]:
# patches
ax.patches
# In[10]:
# example of info in patches
ax.patches[0].get_x()
# In[11]:
plt.figure(figsize=(14, 6));
g = sns.countplot(x='AdoptionSpeed', data=all_data.loc[all_data['dataset_type'] == 'train'])
plt.title('Adoption speed classes rates');
ax=g.axes
for p in ax.patches:
ax.annotate(f"{p.get_height() * 100 / train.shape[0]:.2f}%", (p.get_x() + p.get_width() / 2., p.get_height()),
ha='center', va='center', fontsize=11, color='gray', rotation=0, xytext=(0, 10),
textcoords='offset points')
# We can see that some pets were adopted immediately, but these are rare cases: maybe someone wanted to adopt any pet, or the pet was lucky to be seen by person, who wanted a similar pet.
# A lot of pets aren't adopted at all, which is quite sad :( I hope our models and analysis will help them to find their home!
#
# It is nice that a lot of pets are adopted within a first week of being listed!
#
# One more interesting thing is that the classes have a linear relationship - the higher the number, the worse situation is. So it could be possible to build not only multiclass classification, but also regression.
# ### Type
# 1 - Dog, 2 - Cat
# In[12]:
all_data['Type'] = all_data['Type'].apply(lambda x: 'Dog' if x == 1 else 'Cat')
plt.figure(figsize=(10, 6));
sns.countplot(x='dataset_type', data=all_data, hue='Type');
plt.title('Number of cats and dogs in train and test data');
# We can see that the rate of dogs in train dataset is higher that in test set. But I don't think the difference is seriuos.
# #### Comparison of rates
#
# From here on I'll compare not only counts of pets in different categories, but also compate adoption speed rates with base ones.
#
# This is how it works:
# * As we saw earlier the base rate of pets with Adoption speed 0 is 410 / 14993 = 0.027;
# * Now look at the next graph: there are 6861 cats in train dataset and 240 of them have Adoption Speed 0. So the rate is 240 / 6861 = 0.035;
# * 0.035/0.027 = 1.28, so by splitting out the data to cat vs dog, we can see that cats have a 28% increased chance of adoption speed class 0 over the base rate of adoption;
# In[13]:
main_count = train['AdoptionSpeed'].value_counts(normalize=True).sort_index()
def prepare_plot_dict(df, col, main_count):
"""
Preparing dictionary with data for plotting.
I want to show how much higher/lower are the rates of Adoption speed for the current column comparing to base values (as described higher),
At first I calculate base rates, then for each category in the column I calculate rates of Adoption speed and find difference with the base rates.
"""
main_count = dict(main_count)
plot_dict = {}
for i in df[col].unique():
val_count = dict(df.loc[df[col] == i, 'AdoptionSpeed'].value_counts().sort_index())
for k, v in main_count.items():
if k in val_count:
plot_dict[val_count[k]] = ((val_count[k] / sum(val_count.values())) / main_count[k]) * 100 - 100
else:
plot_dict[0] = 0
return plot_dict
def make_count_plot(df, x, hue='AdoptionSpeed', title='', main_count=main_count):
"""
Plotting countplot with correct annotations.
"""
g = sns.countplot(x=x, data=df, hue=hue);
plt.title(f'AdoptionSpeed {title}');
ax = g.axes
plot_dict = prepare_plot_dict(df, x, main_count)
for p in ax.patches:
h = p.get_height() if str(p.get_height()) != 'nan' else 0
text = f"{plot_dict[h]:.0f}%" if plot_dict[h] < 0 else f"+{plot_dict[h]:.0f}%"
ax.annotate(text, (p.get_x() + p.get_width() / 2., h),
ha='center', va='center', fontsize=11, color='green' if plot_dict[h] > 0 else 'red', rotation=0, xytext=(0, 10),
textcoords='offset points')
# In[14]:
plt.figure(figsize=(18, 8));
make_count_plot(df=all_data.loc[all_data['dataset_type'] == 'train'], x='Type', title='by pet Type')
# We can see that cats are more likely to be adopted early than dogs and overall the percentage of not adopted cats is lower. Does this mean people prefer cats? Or maybe this dataset is small and could contain bias.
# On the other hand more dogs are adopted after several months.
# ### Name
# I don't really think that names are important in adoption, but let's see.
#
# At first let's look at most common names.
# In[15]:
fig, ax = plt.subplots(figsize = (16, 12))
plt.subplot(1, 2, 1)
text_cat = ' '.join(all_data.loc[all_data['Type'] == 'Cat', 'Name'].fillna('').values)
wordcloud = WordCloud(max_font_size=None, background_color='white',
width=1200, height=1000).generate(text_cat)
plt.imshow(wordcloud)
plt.title('Top cat names')
plt.axis("off")
plt.subplot(1, 2, 2)
text_dog = ' '.join(all_data.loc[all_data['Type'] == 'Dog', 'Name'].fillna('').values)
wordcloud = WordCloud(max_font_size=None, background_color='white',
width=1200, height=1000).generate(text_dog)
plt.imshow(wordcloud)
plt.title('Top dog names')
plt.axis("off")
plt.show()
# Cute names! :) I like some of them!
#
# It is worth noticing some things:
# * Often we see normal pet names like "Mimi", "Angel" and so on;
# * Quite often people write simply who is there for adoption: "Kitten", "Puppies";
# * Vety often the color of pet is written, sometimes gender;
# * And it seems that sometimes names can be strange or there is some info written instead of the name;
#
# One more thing to notice is that some pets don't have names. Let's see whether this is important
# In[16]:
print('Most popular pet names and AdoptionSpeed')
for n in train['Name'].value_counts().index[:5]:
print(n)
print(train.loc[train['Name'] == n, 'AdoptionSpeed'].value_counts().sort_index())
print('')
# #### No name
# In[17]:
train['Name'] = train['Name'].fillna('Unnamed')
test['Name'] = test['Name'].fillna('Unnamed')
all_data['Name'] = all_data['Name'].fillna('Unnamed')
train['No_name'] = 0
train.loc[train['Name'] == 'Unnamed', 'No_name'] = 1
test['No_name'] = 0
test.loc[test['Name'] == 'Unnamed', 'No_name'] = 1
all_data['No_name'] = 0
all_data.loc[all_data['Name'] == 'Unnamed', 'No_name'] = 1
print(f"Rate of unnamed pets in train data: {train['No_name'].sum() * 100 / train['No_name'].shape[0]:.4f}%.")
print(f"Rate of unnamed pets in test data: {test['No_name'].sum() * 100 / test['No_name'].shape[0]:.4f}%.")
# In[18]:
pd.crosstab(train['No_name'], train['AdoptionSpeed'], normalize='index')
# Less than 10% of pets don't have names, but they have a higher possibility of not being adopted.
# In[19]:
plt.figure(figsize=(18, 8));
make_count_plot(df=all_data.loc[all_data['dataset_type'] == 'train'], x='No_name', title='and having a name')
# #### "Bad" names
#
# I have noticed that shorter names tend to be meaningless. Here is an example of some names with 3 characters.
# In[20]:
all_data[all_data['Name'].apply(lambda x: len(str(x))) == 3]['Name'].value_counts().tail()
# And here are names with 1 or 2 characters...
# In[21]:
all_data[all_data['Name'].apply(lambda x: len(str(x))) < 3]['Name'].unique()
# I think that we could create a new feature, showing that name is meaningless - pets with these names could have less success in adoption.
# ### Age
# In[22]:
fig, ax = plt.subplots(figsize = (16, 6))
plt.subplot(1, 2, 1)
plt.title('Distribution of pets age');
train['Age'].plot('hist', label='train');
test['Age'].plot('hist', label='test');
plt.legend();
plt.subplot(1, 2, 2)
plt.title('Distribution of pets age (log)');
np.log1p(train['Age']).plot('hist', label='train');
np.log1p(test['Age']).plot('hist', label='test');
plt.legend();
# In[23]:
train['Age'].value_counts().head(10)
# We can see that most pets are young - maybe after the birth. Also there a lot of pets with an age equal to multiples of 12 - I think than owners didn't bother with the exact age.
# In[24]:
plt.figure(figsize=(10, 6));
sns.violinplot(x="AdoptionSpeed", y="Age", hue="Type", data=train);
plt.title('AdoptionSpeed by Type and age');
# In[25]:
data = []
for a in range(5):
df = train.loc[train['AdoptionSpeed'] == a]
data.append(go.Scatter(
x = df['Age'].value_counts().sort_index().index,
y = df['Age'].value_counts().sort_index().values,
name = str(a)
))
layout = go.Layout(dict(title = "AdoptionSpeed trends by Age",
xaxis = dict(title = 'Age (months)'),
yaxis = dict(title = 'Counts'),
)
)
py.iplot(dict(data=data, layout=layout), filename='basic-line')
# * We can see that young pets are adopted quite fast and most of them are adopted;
# * Most pets are less than 4 months old with a huge spike at 2 months;
# * It seems that a lot of people don't input exact age and write age in years (or multiples of 12);
# * It could make sense to create some binary variables based on age;
# ### Breeds
# There is a main breed of the pet and secondary if relevant.
#
# At first let's see whether having secondary breed influences adoption speed.
# In[26]:
train['Pure_breed'] = 0
train.loc[train['Breed2'] == 0, 'Pure_breed'] = 1
test['Pure_breed'] = 0
test.loc[test['Breed2'] == 0, 'Pure_breed'] = 1
all_data['Pure_breed'] = 0
all_data.loc[all_data['Breed2'] == 0, 'Pure_breed'] = 1
print(f"Rate of pure breed pets in train data: {train['Pure_breed'].sum() * 100 / train['Pure_breed'].shape[0]:.4f}%.")
print(f"Rate of pure breed pets in test data: {test['Pure_breed'].sum() * 100 / test['Pure_breed'].shape[0]:.4f}%.")
# In[27]:
def plot_four_graphs(col='', main_title='', dataset_title=''):
"""
Plotting four graphs:
- adoption speed by variable;
- counts of categories in the variable in train and test;
- adoption speed by variable for dogs;
- adoption speed by variable for cats;
"""
plt.figure(figsize=(20, 12));
plt.subplot(2, 2, 1)
make_count_plot(df=train, x=col, title=f'and {main_title}')
plt.subplot(2, 2, 2)
sns.countplot(x='dataset_type', data=all_data, hue=col);
plt.title(dataset_title);
plt.subplot(2, 2, 3)
make_count_plot(df=train.loc[train['Type'] == 1], x=col, title=f'and {main_title} for dogs')
plt.subplot(2, 2, 4)
make_count_plot(df=train.loc[train['Type'] == 2], x=col, title=f'and {main_title} for cats')
plot_four_graphs(col='Pure_breed', main_title='having pure breed', dataset_title='Number of pets by pure/not-pure breed in train and test data')
# It seems that non-pure breed pets tend to be adopted more and faster, especially cats.
#
# Let's look at the breeds themselves
# In[28]:
breeds_dict = {k: v for k, v in zip(breeds['BreedID'], breeds['BreedName'])}
# In[29]:
train['Breed1_name'] = train['Breed1'].apply(lambda x: '_'.join(breeds_dict[x].split()) if x in breeds_dict else 'Unknown')
train['Breed2_name'] = train['Breed2'].apply(lambda x: '_'.join(breeds_dict[x]) if x in breeds_dict else '-')
test['Breed1_name'] = test['Breed1'].apply(lambda x: '_'.join(breeds_dict[x].split()) if x in breeds_dict else 'Unknown')
test['Breed2_name'] = test['Breed2'].apply(lambda x: '_'.join(breeds_dict[x].split()) if x in breeds_dict else '-')
all_data['Breed1_name'] = all_data['Breed1'].apply(lambda x: '_'.join(breeds_dict[x].split()) if x in breeds_dict else 'Unknown')
all_data['Breed2_name'] = all_data['Breed2'].apply(lambda x: '_'.join(breeds_dict[x].split()) if x in breeds_dict else '-')
# In[30]:
fig, ax = plt.subplots(figsize = (20, 18))
plt.subplot(2, 2, 1)
text_cat1 = ' '.join(all_data.loc[all_data['Type'] == 'Cat', 'Breed1_name'].fillna('').values)
wordcloud = WordCloud(max_font_size=None, background_color='black', collocations=False,
width=1200, height=1000).generate(text_cat1)
plt.imshow(wordcloud)
plt.title('Top cat breed1')
plt.axis("off")
plt.subplot(2, 2, 2)
text_dog1 = ' '.join(all_data.loc[all_data['Type'] == 'Dog', 'Breed1_name'].fillna('').values)
wordcloud = WordCloud(max_font_size=None, background_color='black', collocations=False,
width=1200, height=1000).generate(text_dog1)
plt.imshow(wordcloud)
plt.title('Top dog breed1')
plt.axis("off")
plt.subplot(2, 2, 3)
text_cat2 = ' '.join(all_data.loc[all_data['Type'] == 'Cat', 'Breed2_name'].fillna('').values)
wordcloud = WordCloud(max_font_size=None, background_color='black', collocations=False,
width=1200, height=1000).generate(text_cat2)
plt.imshow(wordcloud)
plt.title('Top cat breed1')
plt.axis("off")
plt.subplot(2, 2, 4)
text_dog2 = ' '.join(all_data.loc[all_data['Type'] == 'Dog', 'Breed2_name'].fillna('').values)
wordcloud = WordCloud(max_font_size=None, background_color='black', collocations=False,
width=1200, height=1000).generate(text_dog2)
plt.imshow(wordcloud)
plt.title('Top dog breed2')
plt.axis("off")
plt.show()
# It seems that not all values of these features are really breeds. Sometimes people simply write that the dogs has a mixed breed, cats often are described as domestic with certain hair length.
#
# Now let's have a look at the combinations of breed names.
# In[31]:
(all_data['Breed1_name'] + '__' + all_data['Breed2_name']).value_counts().head(15)
# It seems that most dogs aren't pure breeds, but mixed breeds! My first assumption was wrong.
#
# Sometimes people write "mixed breed" in the first fiels, sometimes in both, and sometimes main breed is in the first field and is marked as mixed breed in the second field.
#
# I think we can create new features based on this information. And later we can verify the hair length of pets.
# ### Gender
# 1 = Male, 2 = Female, 3 = Mixed, if profile represents group of pets
# In[32]:
plt.figure(figsize=(18, 6));
plt.subplot(1, 2, 1)
make_count_plot(df=train, x='Gender', title='and gender')
plt.subplot(1, 2, 2)
sns.countplot(x='dataset_type', data=all_data, hue='Gender');
plt.title('Number of pets by gender in train and test data');
# In[33]:
sns.factorplot('Type', col='Gender', data=all_data, kind='count', hue='dataset_type');
plt.subplots_adjust(top=0.8)
plt.suptitle('Count of cats and dogs in train and test set by gender');
# It seems that male pets are adopted faster than female. Having no information about the gender really decreases chances.
# ### Colors
# In[34]:
colors_dict = {k: v for k, v in zip(colors['ColorID'], colors['ColorName'])}
train['Color1_name'] = train['Color1'].apply(lambda x: colors_dict[x] if x in colors_dict else '')
train['Color2_name'] = train['Color2'].apply(lambda x: colors_dict[x] if x in colors_dict else '')
train['Color3_name'] = train['Color3'].apply(lambda x: colors_dict[x] if x in colors_dict else '')
test['Color1_name'] = test['Color1'].apply(lambda x: colors_dict[x] if x in colors_dict else '')
test['Color2_name'] = test['Color2'].apply(lambda x: colors_dict[x] if x in colors_dict else '')
test['Color3_name'] = test['Color3'].apply(lambda x: colors_dict[x] if x in colors_dict else '')
all_data['Color1_name'] = all_data['Color1'].apply(lambda x: colors_dict[x] if x in colors_dict else '')
all_data['Color2_name'] = all_data['Color2'].apply(lambda x: colors_dict[x] if x in colors_dict else '')
all_data['Color3_name'] = all_data['Color3'].apply(lambda x: colors_dict[x] if x in colors_dict else '')
# In[35]:
def make_factor_plot(df, x, col, title, main_count=main_count, hue=None, ann=True, col_wrap=4):
"""
Plotting countplot.
Making annotations is a bit more complicated, because we need to iterate over axes.
"""
if hue:
g = sns.factorplot(col, col=x, data=df, kind='count', col_wrap=col_wrap, hue=hue);
else:
g = sns.factorplot(col, col=x, data=df, kind='count', col_wrap=col_wrap);
plt.subplots_adjust(top=0.9);
plt.suptitle(title);
ax = g.axes
plot_dict = prepare_plot_dict(df, x, main_count)
if ann:
for a in ax:
for p in a.patches:
text = f"{plot_dict[p.get_height()]:.0f}%" if plot_dict[p.get_height()] < 0 else f"+{plot_dict[p.get_height()]:.0f}%"
a.annotate(text, (p.get_x() + p.get_width() / 2., p.get_height()),
ha='center', va='center', fontsize=11, color='green' if plot_dict[p.get_height()] > 0 else 'red', rotation=0, xytext=(0, 10),
textcoords='offset points')
# In[36]:
sns.factorplot('dataset_type', col='Type', data=all_data, kind='count', hue='Color1_name', palette=['Black', 'Brown', '#FFFDD0', 'Gray', 'Gold', 'White', 'Yellow']);
plt.subplots_adjust(top=0.8)
plt.suptitle('Counts of pets in datasets by main color');
# We can see that most common colors are black and brown. Interesting to notice that there are almost no gray or yellow dogs :)
#
# Now let's see whether colors influence adoption speed
# In[37]:
make_factor_plot(df=train, x='Color1_name', col='AdoptionSpeed', title='Counts of pets by main color and Adoption Speed')
# In[38]:
train['full_color'] = (train['Color1_name'] + '__' + train['Color2_name'] + '__' + train['Color3_name']).str.replace('__', '')
test['full_color'] = (test['Color1_name'] + '__' + test['Color2_name'] + '__' + test['Color3_name']).str.replace('__', '')
all_data['full_color'] = (all_data['Color1_name'] + '__' + all_data['Color2_name'] + '__' + all_data['Color3_name']).str.replace('__', '')
make_factor_plot(df=train.loc[train['full_color'].isin(list(train['full_color'].value_counts().index)[:12])], x='full_color', col='AdoptionSpeed', title='Counts of pets by color and Adoption Speed')
# We can see that there are some differences based on color, but the number of pets in most colors isn't very high, so this could be due to randomness.
# In[39]:
gender_dict = {1: 'Male', 2: 'Female', 3: 'Mixed'}
for i in all_data['Type'].unique():
for j in all_data['Gender'].unique():
df = all_data.loc[(all_data['Type'] == i) & (all_data['Gender'] == j)]
top_colors = list(df['full_color'].value_counts().index)[:5]
j = gender_dict[j]
print(f"Most popular colors of {j} {i}s: {' '.join(top_colors)}")
# ### MatiritySize
# Size at maturity (1 = Small, 2 = Medium, 3 = Large, 4 = Extra Large, 0 = Not Specified)
# In[40]:
plot_four_graphs(col='MaturitySize', main_title='MaturitySize', dataset_title='Number of pets by MaturitySize in train and test data')
# In[41]:
make_factor_plot(df=all_data, x='MaturitySize', col='Type', title='Count of cats and dogs in train and test set by MaturitySize', hue='dataset_type', ann=False)
# In[42]:
images = [i.split('-')[0] for i in os.listdir('../input/train_images/')]
size_dict = {1: 'Small', 2: 'Medium', 3: 'Large', 4: 'Extra Large'}
for t in all_data['Type'].unique():
for m in all_data['MaturitySize'].unique():
df = all_data.loc[(all_data['Type'] == t) & (all_data['MaturitySize'] == m)]
top_breeds = list(df['Breed1_name'].value_counts().index)[:5]
m = size_dict[m]
print(f"Most common Breeds of {m} {t}s:")
fig = plt.figure(figsize=(25, 4))
for i, breed in enumerate(top_breeds):
# excluding pets without pictures
b_df = df.loc[(df['Breed1_name'] == breed) & (df['PetID'].isin(images)), 'PetID']
if len(b_df) > 1:
pet_id = b_df.values[1]
else:
pet_id = b_df.values[0]
ax = fig.add_subplot(1, 5, i+1, xticks=[], yticks=[])
im = Image.open("../input/train_images/" + pet_id + '-1.jpg')
plt.imshow(im)
ax.set_title(f'Breed: {breed}')
plt.show();
# Quite interesting:
# * We can see that maturity size isn't very important. Medium sized pets are most common and they have slightly more chances to be not adopted;
# * There are almost no Extra Large pets. I hope it means that their owners like them and there is no need for them to be adopted :)
# * I wanted to gave a look at different pets, so I showed examples of pictures of most common breeds for each maturity size of cats and dogs;
# * I think not all data is entirely correct: sometimes short haired cats have breed with "medium hair", not sure that all breeds are entirely correct. Some photoes have bad quality;
# ### FurLength
#
# (1 = Short, 2 = Medium, 3 = Long, 0 = Not Specified)
# In[43]:
plot_four_graphs(col='FurLength', main_title='FurLength', dataset_title='Number of pets by FurLength in train and test data')
# * We can see that most of the pets have short fur and long fur is the least common;
# * Pets with long hair tend to have a higher chance of being adopted. Though it could be because of randomness due to low count;
#
# As I said earlier, some breed have hair length in the text, let's check these values!
# In[44]:
fig, ax = plt.subplots(figsize = (20, 18))
plt.subplot(2, 2, 1)
text_cat1 = ' '.join(all_data.loc[(all_data['FurLength'] == 1) & (all_data['Type'] == 'Cat'), 'Breed1_name'].fillna('').values)
wordcloud = WordCloud(max_font_size=None, background_color='white', collocations=False,
width=1200, height=1000).generate(text_cat1)
plt.imshow(wordcloud)
plt.title('Top cat breed1 with short fur')
plt.axis("off")
plt.subplot(2, 2, 2)
text_dog1 = ' '.join(all_data.loc[(all_data['FurLength'] == 1) & (all_data['Type'] == 'Dog'), 'Breed1_name'].fillna('').values)
wordcloud = WordCloud(max_font_size=None, background_color='white', collocations=False,
width=1200, height=1000).generate(text_dog1)
plt.imshow(wordcloud)
plt.title('Top dog breed1 with short fur')
plt.axis("off")
plt.subplot(2, 2, 3)
text_cat2 = ' '.join(all_data.loc[(all_data['FurLength'] == 2) & (all_data['Type'] == 'Cat'), 'Breed1_name'].fillna('').values)
wordcloud = WordCloud(max_font_size=None, background_color='white', collocations=False,
width=1200, height=1000).generate(text_cat2)
plt.imshow(wordcloud)
plt.title('Top cat breed1 with medium fur')
plt.axis("off")
plt.subplot(2, 2, 4)
text_dog2 = ' '.join(all_data.loc[(all_data['FurLength'] == 2) & (all_data['Type'] == 'Dog'), 'Breed1_name'].fillna('').values)
wordcloud = WordCloud(max_font_size=None, background_color='white', collocations=False,
width=1200, height=1000).generate(text_dog2)
plt.imshow(wordcloud)
plt.title('Top dog breed2 with medium fur')
plt.axis("off")
plt.show()
# In[45]:
c = 0
strange_pets = []
for i, row in all_data[all_data['Breed1_name'].str.contains('air')].iterrows():
if 'Short' in row['Breed1_name'] and row['FurLength'] == 1:
pass
elif 'Medium' in row['Breed1_name'] and row['FurLength'] == 2:
pass
elif 'Long' in row['Breed1_name'] and row['FurLength'] == 3:
pass
else:
c += 1
strange_pets.append((row['PetID'], row['Breed1_name'], row['FurLength']))
print(f"There are {c} pets whose breed and fur length don't match")
# It seems that almost one thousand pets have mismatch in breeds and fur lengths. Let's see!
# In[46]:
strange_pets = [p for p in strange_pets if p[0] in images]
fig = plt.figure(figsize=(25, 12))
fur_dict = {1: 'Short', 2: 'Medium', 3: 'long'}
for i, s in enumerate(random.sample(strange_pets, 12)):
ax = fig.add_subplot(3, 4, i+1, xticks=[], yticks=[])
im = Image.open("../input/train_images/" + s[0] + '-1.jpg')
plt.imshow(im)
ax.set_title(f'Breed: {s[1]} \n Fur length: {fur_dict[s[2]]}')
plt.show();
# Everybody lies!
#
# Sometimes breed is more correct, sometimes fur length... I suppose we could create a feature showing whether breed and fur length match.
# ### Health
#
# There are four features showing health of the pets:
#
# * Vaccinated - Pet has been vaccinated (1 = Yes, 2 = No, 3 = Not Sure)
# * Dewormed - Pet has been dewormed (1 = Yes, 2 = No, 3 = Not Sure)
# * Sterilized - Pet has been spayed / neutered (1 = Yes, 2 = No, 3 = Not Sure)
# * Health - Health Condition (1 = Healthy, 2 = Minor Injury, 3 = Serious Injury, 0 = Not Specified)
#
# I think that these features are very important - most people would prefer a healthy pet. While sterilization isn't the main concern, having healty and dewormed pet should have a great importance. Let's see whether I'm right!
# In[47]:
plt.figure(figsize=(20, 12));
plt.subplot(2, 2, 1)
make_count_plot(df=train, x='Vaccinated', title='Vaccinated')
plt.xticks([0, 1, 2], ['Yes', 'No', 'Not sure']);
plt.title('AdoptionSpeed and Vaccinated');
plt.subplot(2, 2, 2)
make_count_plot(df=train, x='Dewormed', title='Dewormed')
plt.xticks([0, 1, 2], ['Yes', 'No', 'Not sure']);
plt.title('AdoptionSpeed and Dewormed');
plt.subplot(2, 2, 3)
make_count_plot(df=train, x='Sterilized', title='Sterilized')
plt.xticks([0, 1, 2], ['Yes', 'No', 'Not sure']);
plt.title('AdoptionSpeed and Sterilized');
plt.subplot(2, 2, 4)
make_count_plot(df=train, x='Health', title='Health')
plt.xticks([0, 1, 2], ['Healthy', 'Minor Injury', 'Serious Injury']);
plt.title('AdoptionSpeed and Health');
plt.suptitle('Adoption Speed and health conditions');
# * Almost all pets are healthy! Pets with minor injuries are rare and sadly they aren't adopted well. Number of pets with serious injuries is negligible.
# * It is interesting that people prefer non-vaccinated pets. Maybe they want to bring pets to vets themselves...
# * People also prefer non-sterilized pets! Maybe they want puppies/kittens :)
# * Quite important is the fact that when there is no information about health condition, the probability of not being adopted is much higher;
#
# Let's have a look at most popular health conditions.
# In[48]:
train['health'] = train['Vaccinated'].astype(str) + '_' + train['Dewormed'].astype(str) + '_' + train['Sterilized'].astype(str) + '_' + train['Health'].astype(str)
test['health'] = test['Vaccinated'].astype(str) + '_' + test['Dewormed'].astype(str) + '_' + test['Sterilized'].astype(str) + '_' + test['Health'].astype(str)
make_factor_plot(df=train.loc[train['health'].isin(list(train.health.value_counts().index[:5]))], x='health', col='AdoptionSpeed', title='Counts of pets by main health conditions and Adoption Speed')
# * Healthy, dewormed and non-sterilized pets tend to be adopted faster!
# * Completely healthy pets are... more likely to be not adopted! I suppose that means that a lot of people pay attention to other characteristics;
# * And healthy pets with no information (not sure value) also tend to be adopted less frequently. Maybe people prefer having information, even if it is negative;
# In[49]:
plt.figure(figsize=(20, 16))
plt.subplot(3, 2, 1)
sns.violinplot(x="AdoptionSpeed", y="Age", data=train);
plt.title('Age distribution by Age');
plt.subplot(3, 2, 3)
sns.violinplot(x="AdoptionSpeed", y="Age", hue="Vaccinated", data=train);
plt.title('Age distribution by Age and Vaccinated');
plt.subplot(3, 2, 4)
sns.violinplot(x="AdoptionSpeed", y="Age", hue="Dewormed", data=train);
plt.title('Age distribution by Age and Dewormed');
plt.subplot(3, 2, 5)
sns.violinplot(x="AdoptionSpeed", y="Age", hue="Sterilized", data=train);
plt.title('Age distribution by Age and Sterilized');
plt.subplot(3, 2, 6)
sns.violinplot(x="AdoptionSpeed", y="Age", hue="Health", data=train);
plt.title('Age distribution by Age and Health');
# ### Quantity
# Sometimes there are several pets in one advertisement.
# In[50]:
train.loc[train['Quantity'] > 11][['Name', 'Description', 'Quantity', 'AdoptionSpeed']].head(10)
# In[51]:
train['Quantity'].value_counts().head(10)
# Sometimes there is a huge amount of pets in some advertisements! But at the same time sometimes text and the quantity don't match. For example:
#
# Pancho and Tita are 2 adorable, playful kittens. They can be shy at first but once they get to know you they are the sweetest pets anyone could ask for. Available for adoption now. They are very, very close so we are looking for someone who can take them both.
#
# Obvously there are only two kittens, but the quantity is 12 for some reason.
#
# One thing worth noticing that sometimes all these pet are adopted which is great!
#
# For the sake of plotting I'll create a new variable, where 6 pets in one advertizement will the the max amount.
# In[52]:
train['Quantity_short'] = train['Quantity'].apply(lambda x: x if x <= 5 else 6)
test['Quantity_short'] = test['Quantity'].apply(lambda x: x if x <= 5 else 6)
all_data['Quantity_short'] = all_data['Quantity'].apply(lambda x: x if x <= 5 else 6)
plot_four_graphs(col='Quantity_short', main_title='Quantity_short', dataset_title='Number of pets by Quantity_short in train and test data')
# It seems that quantity has little to do with adoption speed. This is good, it means that abandoned cats/dogs with kittens/puppies have chances of being adopted! Though it seems that single cats have somewhat higher chances that single dogs.
# ### Fee
# One of interesting features is adoption fee. Some pets can be gotten for free, adopting some required paying a certain amount.
# In[53]:
train['Free'] = train['Fee'].apply(lambda x: 'Free' if x == 0 else 'Not Free')
test['Free'] = test['Fee'].apply(lambda x: 'Free' if x == 0 else 'Not Free')
all_data['Free'] = all_data['Fee'].apply(lambda x: 'Free' if x == 0 else 'Not Free')
plot_four_graphs(col='Free', main_title='Free', dataset_title='Number of pets by Free in train and test data')
# Most pets are free and it seems that asking for a fee slightly desreased the chance of adoption. Also free cats are adopted faster than free dogs
# In[54]:
all_data.sort_values('Fee', ascending=False)[['Name', 'Description', 'Fee', 'AdoptionSpeed', 'dataset_type']].head(10)
# In[55]:
plt.figure(figsize=(16, 6));
plt.subplot(1, 2, 1)
plt.hist(train.loc[train['Fee'] < 400, 'Fee']);
plt.title('Distribution of fees lower than 400');
plt.subplot(1, 2, 2)
sns.violinplot(x="AdoptionSpeed", y="Fee", hue="Type", data=train);
plt.title('AdoptionSpeed by Type and Fee');
# * It is interesting that pets with high fee tend to be adopted quite fast! Maybe people prefer to pay for "better" pets: healthy, trained and so on;
# * Most pets are given for free and fees are usually lower than 100 $;
# * Fees for dogs tend to be higher, though these are rare cases anyway.
# In[56]:
plt.figure(figsize=(16, 10));
sns.scatterplot(x="Fee", y="Quantity", hue="Type",data=all_data);
plt.title('Quantity of pets and Fee');
# It seems that fees and pet quantity have inversely proportional relationship. The less pets, the higher is the fee. I suppose these single pets are better trained and prepared than most others.
# ### State
# In[57]:
states_dict = {k: v for k, v in zip(states['StateID'], states['StateName'])}
train['State_name'] = train['State'].apply(lambda x: '_'.join(states_dict[x].split()) if x in states_dict else 'Unknown')
test['State_name'] = test['State'].apply(lambda x: '_'.join(states_dict[x].split()) if x in states_dict else 'Unknown')
all_data['State_name'] = all_data['State'].apply(lambda x: '_'.join(states_dict[x].split()) if x in states_dict else 'Unknown')
# In[58]:
all_data['State_name'].value_counts(normalize=True).head()
# Sadly I don't know anything about Malaysia’s states, so I can only say that top three states account for ~90% of ads. Let's have a look at them.
# In[59]:
make_factor_plot(df=train.loc[train['State_name'].isin(list(train.State_name.value_counts().index[:3]))], x='State_name', col='AdoptionSpeed', title='Counts of pets by states and Adoption Speed')
# Intetestingly top-2 and top-3 states have lower rates of adoption.
# ### Rescuer
# We have unique hashes for resquers.
# In[60]:
all_data['RescuerID'].value_counts().head()
# Top-5 resquers managed a lot of pets!
# I wonder whether these are individual people or organizations. Let's have a look at them.
# In[61]:
make_factor_plot(df=train.loc[train['RescuerID'].isin(list(train.RescuerID.value_counts().index[:5]))], x='RescuerID', col='AdoptionSpeed', title='Counts of pets by rescuers and Adoption Speed', col_wrap=5)
# Wow! The resquer with the highest amount of resqued pets has the best adoption rate! On the other hand the third one has the worst rate :(
# ### VideoAmt
# In[62]:
train['VideoAmt'].value_counts()
# Hm. In most cases there are no videos at all. Sometimes there is one video, more than one video is quite rare. We don't have videos and considering a huge disbalance in values I'm not sure this variable will be useful.
# ### PhotoAmt
# In[63]:
print(F'Maximum amount of photos in {train["PhotoAmt"].max()}')
train['PhotoAmt'].value_counts().head()
# In[64]:
make_factor_plot(df=train.loc[train['PhotoAmt'].isin(list(train.PhotoAmt.value_counts().index[:5]))], x='PhotoAmt', col='AdoptionSpeed', title='Counts of pets by PhotoAmt and Adoption Speed', col_wrap=5)
# In[65]:
plt.figure(figsize=(16, 6));
plt.subplot(1, 2, 1)
plt.hist(train['PhotoAmt']);
plt.title('Distribution of PhotoAmt');
plt.subplot(1, 2, 2)
sns.violinplot(x="AdoptionSpeed", y="PhotoAmt", hue="Type", data=train);
plt.title('AdoptionSpeed by Type and PhotoAmt');
# Pets can have up to 30 photos! That's a lot! But I'm not convinced that amount of photoes has any real influence.
# ### Description
#
# Description contains a lot of important information, let' analyze it!
# In[66]:
fig, ax = plt.subplots(figsize = (12, 8))
text_cat = ' '.join(all_data['Description'].fillna('').values)
wordcloud = WordCloud(max_font_size=None, background_color='white',
width=1200, height=1000).generate(text_cat)
plt.imshow(wordcloud)
plt.title('Top words in description');
plt.axis("off");
# There are too many similar general words like "cat". We need to go deeper.
#
# Let's use ELI5 library for prediction explanation. I'll fit a basic vectorizer on desctriptions and build a simple Random Forest model. Then we will look at words which caused certain labels to be predicted.
# In[67]:
tokenizer = TweetTokenizer()
vectorizer = TfidfVectorizer(ngram_range=(1, 2), tokenizer=tokenizer.tokenize)
vectorizer.fit(all_data['Description'].fillna('').values)
X_train = vectorizer.transform(train['Description'].fillna(''))
rf = RandomForestClassifier(n_estimators=20)
rf.fit(X_train, train['AdoptionSpeed'])
# In[68]:
for i in range(5):
print(f'Example of Adoption speed {i}')
text = train.loc[train['AdoptionSpeed'] == i, 'Description'].values[0]
print(text)
display(eli5.show_prediction(rf, doc=text, vec=vectorizer, top=10))
# Some words/phrases seem to be useful, but it seems that different adoption speed classes could have similar important words...
# In[69]:
train['Description'] = train['Description'].fillna('')
test['Description'] = test['Description'].fillna('')
all_data['Description'] = all_data['Description'].fillna('')
train['desc_length'] = train['Description'].apply(lambda x: len(x))
train['desc_words'] = train['Description'].apply(lambda x: len(x.split()))
test['desc_length'] = test['Description'].apply(lambda x: len(x))
test['desc_words'] = test['Description'].apply(lambda x: len(x.split()))
all_data['desc_length'] = all_data['Description'].apply(lambda x: len(x))
all_data['desc_words'] = all_data['Description'].apply(lambda x: len(x.split()))
train['averate_word_length'] = train['desc_length'] / train['desc_words']
test['averate_word_length'] = test['desc_length'] / test['desc_words']
all_data['averate_word_length'] = all_data['desc_length'] / all_data['desc_words']
# In[70]:
plt.figure(figsize=(16, 6));
plt.subplot(1, 2, 1)
sns.violinplot(x="AdoptionSpeed", y="desc_length", hue="Type", data=train);
plt.title('AdoptionSpeed by Type and description length');
plt.subplot(1, 2, 2)
sns.violinplot(x="AdoptionSpeed", y="desc_words", hue="Type", data=train);
plt.title('AdoptionSpeed by Type and count of words in description');
# Interestingly pets with short text in ads are adopted quickly. Or maybe longer descriptions mean more problems in the pets, therefore adoption speed is lower?
# ### Sentiment
# We have run each pet profile's description through Google's Natural Language API, providing analysis on sentiment and key entities. You may optionally utilize this supplementary information for your pet description analysis. There are some descriptions that the API could not analyze. As such, there are fewer sentiment files than there are rows in the dataset.
# In[71]:
sentiment_dict = {}
for filename in os.listdir('../input/train_sentiment/'):
with open('../input/train_sentiment/' + filename, 'r') as f:
sentiment = json.load(f)
pet_id = filename.split('.')[0]
sentiment_dict[pet_id] = {}
sentiment_dict[pet_id]['magnitude'] = sentiment['documentSentiment']['magnitude']
sentiment_dict[pet_id]['score'] = sentiment['documentSentiment']['score']
sentiment_dict[pet_id]['language'] = sentiment['language']
for filename in os.listdir('../input/test_sentiment/'):
with open('../input/test_sentiment/' + filename, 'r') as f:
sentiment = json.load(f)
pet_id = filename.split('.')[0]
sentiment_dict[pet_id] = {}
sentiment_dict[pet_id]['magnitude'] = sentiment['documentSentiment']['magnitude']
sentiment_dict[pet_id]['score'] = sentiment['documentSentiment']['score']
sentiment_dict[pet_id]['language'] = sentiment['language']
# In[72]:
train['lang'] = train['PetID'].apply(lambda x: sentiment_dict[x]['language'] if x in sentiment_dict else 'no')
train['magnitude'] = train['PetID'].apply(lambda x: sentiment_dict[x]['magnitude'] if x in sentiment_dict else 0)
train['score'] = train['PetID'].apply(lambda x: sentiment_dict[x]['score'] if x in sentiment_dict else 0)
test['lang'] = test['PetID'].apply(lambda x: sentiment_dict[x]['language'] if x in sentiment_dict else 'no')
test['magnitude'] = test['PetID'].apply(lambda x: sentiment_dict[x]['magnitude'] if x in sentiment_dict else 0)
test['score'] = test['PetID'].apply(lambda x: sentiment_dict[x]['score'] if x in sentiment_dict else 0)
all_data['lang'] = all_data['PetID'].apply(lambda x: sentiment_dict[x]['language'] if x in sentiment_dict else 'no')
all_data['magnitude'] = all_data['PetID'].apply(lambda x: sentiment_dict[x]['magnitude'] if x in sentiment_dict else 0)
all_data['score'] = all_data['PetID'].apply(lambda x: sentiment_dict[x]['score'] if x in sentiment_dict else 0)
# In[73]:
plot_four_graphs(col='lang', main_title='lang', dataset_title='Number of pets by lang in train and test data')
# Well, English is the most common language by far, so language feature will hardly help.
# In[74]:
plt.figure(figsize=(16, 6));
plt.subplot(1, 2, 1)
sns.violinplot(x="AdoptionSpeed", y="score", hue="Type", data=train);
plt.title('AdoptionSpeed by Type and score');
plt.subplot(1, 2, 2)
sns.violinplot(x="AdoptionSpeed", y="magnitude", hue="Type", data=train);
plt.title('AdoptionSpeed by Type and magnitude of sentiment');
# It seems that the lower is the magnitude of score, the faster pets are adopted.
# ### Basic model
#
# There are much more interesting things in the dataset and I'm going to explore them, but for now let's build a simple model as a baseline.
# In[75]:
cols_to_use = ['Type', 'Age', 'Breed1', 'Breed2', 'Gender', 'Color1', 'Color2',
'Color3', 'MaturitySize', 'FurLength', 'Vaccinated', 'Dewormed',
'Sterilized', 'Health', 'Quantity', 'Fee', 'State', 'RescuerID', 'health', 'Free', 'score',
'VideoAmt', 'PhotoAmt', 'AdoptionSpeed', 'No_name', 'Pure_breed', 'desc_length', 'desc_words', 'averate_word_length', 'magnitude']
train = train[[col for col in cols_to_use if col in train.columns]]
test = test[[col for col in cols_to_use if col in test.columns]]
# In[76]:
cat_cols = ['Type', 'Breed1', 'Breed2', 'Gender', 'Color1', 'Color2',
'Color3', 'MaturitySize', 'FurLength', 'Vaccinated', 'Dewormed',
'Sterilized', 'Health', 'State', 'RescuerID',
'No_name', 'Pure_breed', 'health', 'Free']
# In[77]:
more_cols = []
for col1 in cat_cols:
for col2 in cat_cols:
if col1 != col2 and col1 not in ['RescuerID', 'State'] and col2 not in ['RescuerID', 'State']:
train[col1 + '_' + col2] = train[col1].astype(str) + '_' + train[col2].astype(str)
test[col1 + '_' + col2] = test[col1].astype(str) + '_' + test[col2].astype(str)
more_cols.append(col1 + '_' + col2)
cat_cols = cat_cols + more_cols
# In[78]:
get_ipython().run_cell_magic('time', '', 'indexer = {}\nfor col in cat_cols:\n # print(col)\n _, indexer[col] = pd.factorize(train[col].astype(str))\n \nfor col in tqdm_notebook(cat_cols):\n # print(col)\n train[col] = indexer[col].get_indexer(train[col].astype(str))\n test[col] = indexer[col].get_indexer(test[col].astype(str))')
# In[79]:
y = train['AdoptionSpeed']
train = train.drop(['AdoptionSpeed'], axis=1)
# ## Naive multiclass LGB
# In[80]:
n_fold = 5
folds = StratifiedKFold(n_splits=n_fold, shuffle=True, random_state=15)
# In[81]:
def train_model(X=train, X_test=test, y=y, params=None, folds=folds, model_type='lgb', plot_feature_importance=False, averaging='usual', make_oof=False):
result_dict = {}
if make_oof:
oof = np.zeros((len(X), 5))
prediction = np.zeros((len(X_test), 5))
scores = []
feature_importance = pd.DataFrame()
for fold_n, (train_index, valid_index) in enumerate(folds.split(X, y)):
gc.collect()
print('Fold', fold_n + 1, 'started at', time.ctime())
X_train, X_valid = X.iloc[train_index], X.iloc[valid_index]
y_train, y_valid = y.iloc[train_index], y.iloc[valid_index]
if model_type == 'lgb':
train_data = lgb.Dataset(X_train, label=y_train, categorical_feature = cat_cols)
valid_data = lgb.Dataset(X_valid, label=y_valid, categorical_feature = cat_cols)
model = lgb.train(params,
train_data,
num_boost_round=20000,
valid_sets = [train_data, valid_data],
verbose_eval=500,
early_stopping_rounds = 200)
del train_data, valid_data
y_pred_valid = model.predict(X_valid, num_iteration=model.best_iteration)
del X_valid
gc.collect()
y_pred = model.predict(X_test, num_iteration=model.best_iteration)
if model_type == 'xgb':
train_data = xgb.DMatrix(data=X_train, label=y_train)
valid_data = xgb.DMatrix(data=X_valid, label=y_valid)
watchlist = [(train_data, 'train'), (valid_data, 'valid_data')]
model = xgb.train(dtrain=train_data, num_boost_round=20000, evals=watchlist, early_stopping_rounds=200, verbose_eval=500, params=params)
y_pred_valid = model.predict(xgb.DMatrix(X_valid), ntree_limit=model.best_ntree_limit)
y_pred = model.predict(xgb.DMatrix(X_test), ntree_limit=model.best_ntree_limit)
if model_type == 'lcv':
model = LogisticRegressionCV(scoring='neg_log_loss', cv=3, multi_class='multinomial')
model.fit(X_train, y_train)
y_pred_valid = model.predict(X_valid)
y_pred = model.predict(X_test)
if model_type == 'cat':
model = CatBoostClassifier(iterations=20000, loss_function='MultiClass', **params)
model.fit(X_train, y_train, eval_set=(X_valid, y_valid), cat_features=[], use_best_model=True, verbose=False)
y_pred_valid = model.predict(X_valid)
y_pred = model.predict(X_test).reshape(-1,)
if make_oof:
oof[valid_index] = y_pred_valid
scores.append(kappa(y_valid, y_pred_valid.argmax(1)))
print('Fold kappa:', kappa(y_valid, y_pred_valid.argmax(1)))
print('')
if averaging == 'usual':
prediction += y_pred
elif averaging == 'rank':
prediction += pd.Series(y_pred).rank().values
if model_type == 'lgb':
# feature importance
fold_importance = | pd.DataFrame() | pandas.DataFrame |
import urllib
import pytest
import pandas as pd
from pandas import testing as pdt
from anonympy import __version__
from anonympy.pandas import dfAnonymizer
from anonympy.pandas.utils_pandas import load_dataset
@pytest.fixture(scope="module")
def anonym_small():
df = load_dataset('small')
anonym = dfAnonymizer(df)
return anonym
@pytest.fixture(scope="module")
def anonym_big():
try:
df = load_dataset('big')
anonym = dfAnonymizer(df)
except urllib.error.HTTPError:
anonym = None
return anonym
def test_anonym_obj(anonym_small, anonym_big):
assert isinstance(anonym_small, dfAnonymizer), "should have\
returned `dfAnonymizer` object"
if anonym_big is None:
assert False, "Failed to fetch the DataFrame"
assert isinstance(anonym_big, dfAnonymizer), "should have returned\
`dfAnonymizer` object"
def test_numeric_noise(anonym_small):
output = anonym_small.numeric_noise('age', seed=42, inplace=False)
expected = pd.Series([38, 47], dtype='int64')
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_noise(['age', 'salary'],
seed=42,
inplace=False)
expected = pd.DataFrame({'age': [38, 47],
'salary': [59239.79912097112, 49323.30756879504]})
pdt.assert_frame_equal(expected, output)
def test_numeric_binning(anonym_small):
output = anonym_small.numeric_binning('salary', bins=2, inplace=False)
dtype = pd.CategoricalDtype([
pd.Interval(49315.0, 54279.0, closed='right'),
pd.Interval(54279.0, 59234.0, closed='right')],
ordered=True)
expected = pd.Series([
pd.Interval(54279.0, 59234.0, closed='right'),
pd.Interval(49315.0, 54279.0, closed='right')],
dtype=dtype)
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_binning(['age', 'salary'],
bins=2,
inplace=False)
dtype2 = pd.CategoricalDtype([
pd.Interval(33.0, 40.0, closed='right'),
pd.Interval(40.0, 48.0, closed='right')],
ordered=True)
ser2 = pd.Series([
pd.Interval(33.0, 40.0, closed='right'),
pd.Interval(40.0, 48.0, closed='right')],
dtype=dtype2)
expected = pd.DataFrame({'age': ser2, 'salary': expected})
pdt.assert_frame_equal(expected, output)
def test_numeric_masking(anonym_small):
output = anonym_small.numeric_masking('age', inplace=False)
expected = pd.Series([7.5, -7.5], dtype='float64')
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_masking(['age', 'salary'], inplace=False)
expected = pd.DataFrame({'age': [-4954.900676201789, 4954.900676201798],
'salary': [5.840670901327418e-15,
5.840670901327409e-15]})
pdt.assert_frame_equal(expected, output)
def test_numeric_rounding(anonym_small):
output = anonym_small.numeric_rounding('salary', inplace=False)
expected = pd.Series([60000.0, 50000.0], dtype='float64')
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_rounding(['age', 'salary'], inplace=False)
expected = pd.DataFrame({'age': {0: 30, 1: 50}, 'salary': {0: 60000.0,
1: 50000.0}})
pdt.assert_frame_equal(expected, output)
@pytest.mark.skipif(__version__ == '0.2.4',
reason="Requires anonympy >= 0.2.5")
def test_categorical_fake(anonym_small):
output = anonym_small.categorical_fake('name',
locale=['en_US'],
seed=42,
inplace=False)
expected = pd.Series(['<NAME>', '<NAME>'])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.categorical_fake(['name', 'email'],
locale=['en_GB'],
seed=42,
inplace=False)
expected = pd.DataFrame({'name': {0: '<NAME>', 1: '<NAME>'},
'email': {0: '<EMAIL>',
1: '<EMAIL>'}})
pdt.assert_frame_equal(expected, output)
output = anonym_small.categorical_fake({'name': 'name_female'},
seed=42,
inplace=False)
expected = pd.Series(['<NAME>', '<NAME>'])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.categorical_fake({'ssn': 'ssn', 'web': 'url'},
seed=42,
inplace=False)
expected = pd.DataFrame({'ssn': {0: '655-15-0410', 1: '760-36-4013'},
'web': {0: 'http://www.hill.net/',
1: 'http://johnson.com/'}})
pdt.assert_frame_equal(expected, output)
def test_categorical_fake_auto(anonym_small):
output = anonym_small.categorical_fake_auto(seed=42, inplace=False)
expected = pd.DataFrame({'name': {0: '<NAME>', 1: '<NAME>'},
'email': {0: '<EMAIL>',
1: '<EMAIL>'},
'ssn': {0: '655-15-0410', 1: '760-36-4013'}})
pdt.assert_frame_equal(expected, output)
@pytest.mark.skipif(__version__ == '0.2.4',
reason="Requires anonympy >= 0.2.5")
def test_categorical_resampling(anonym_small):
output = anonym_small.categorical_resampling('name',
inplace=False,
seed=42)
expected = pd.Series(['Bruce', 'Tony'])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.categorical_resampling(['web', 'ssn'],
seed=2,
inplace=False)
expected = pd.DataFrame({'web':
{0: 'http://www.alandrosenburgcpapc.co.uk',
1: 'http://www.alandrosenburgcpapc.co.uk'},
'ssn': {0: '656564664', 1: '343554334'}})
pdt.assert_frame_equal(expected, output)
@pytest.mark.skipif(__version__ == '0.2.4',
reason="Requires anonympy >= 0.2.5")
def test_categorical_tokenization(anonym_small):
output = anonym_small.categorical_tokenization('name',
key='test',
inplace=False)
expected = pd.Series(['45fe1a783c', 'bda8a41313'])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.categorical_tokenization(['web', 'ssn'],
key='test',
inplace=False)
expected = pd.DataFrame({'web': {0: 'e667d84f37', 1: '986a819ea2'},
'ssn': {0: '0f7c17cc6f', 1: 'f42ad34907'}})
pdt.assert_frame_equal(expected, output)
def test_categorical_email_masking(anonym_small):
output = anonym_small.categorical_email_masking('email', inplace=False)
expected = pd.Series(['<EMAIL>', '<EMAIL>'])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.categorical_email_masking(['email', 'email'],
inplace=False)
expected = pd.DataFrame(
{'email': {0: '<EMAIL>', 1: '<EMAIL>'}})
pdt.assert_frame_equal(expected, output)
def test_datetime_noise(anonym_small):
output = anonym_small.datetime_noise('birthdate', seed=42, inplace=False)
expected = pd.Series([pd.Timestamp('1914-07-22 00:00:00'),
pd.Timestamp('1970-10-25 00:00:00')])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.datetime_noise(['birthdate', 'birthdate'],
seed=42,
inplace=False)
expected = pd.DataFrame(
{'birthdate': {0: pd.Timestamp('1914-07-22 00:00:00'),
1: | pd.Timestamp('1970-10-25 00:00:00') | pandas.Timestamp |
import numpy as np
import pandas as pd
def get_best_param_dist(model_dict):
"""Computes mean and variance of the best fit params accross different runs
Args:
model_dict (dict): Dict containing the predictions dict for all the runs for a given
scenario, config setting
Returns:
dataframe containing mean, std for all the parameters
"""
best_param_vals = [run_dict['best_params'] for _, run_dict in model_dict.items()]
df = pd.DataFrame(best_param_vals).describe()
return df.loc[['mean', 'std']]
def get_ensemble_combined(model_dict, weighting='exp', beta=1):
"""Computes ensemble mean and variance of all the params accross different runs
Args:
model_dict (dict): Dict containing the predictions dict for all the runs for a given
scenario, config setting
weighting (str, optional): The weighting function.
If 'exp', np.exp(-beta*loss) is the weighting function used. (beta is separate param here)
If 'inv', 1/loss is used. Else, uniform weighting is used. Defaults to 'exp'.
beta (float, optional): beta param for exponential weighting
Returns:
dataframe containing mean, std for all the parameters
"""
params_dict = { k: np.array([]) for k in model_dict[list(model_dict.keys())[0]]['best_params'].keys() }
losses_array = np.array([])
for _, run_dict in model_dict.items():
params_array = run_dict['trials']['params']
loss_array = run_dict['trials']['losses']
losses_array = np.concatenate((losses_array, loss_array), axis=0)
for param in params_dict.keys():
params_vals = np.array([param_dict[param] for param_dict in params_array])
params_dict[param] = np.concatenate((params_dict[param],params_vals),axis=0)
if weighting == 'exp':
weights = np.exp(-beta*np.array(losses_array))
elif weighting == 'inverse':
weights = 1/np.array(losses_array)
else:
weights = np.ones(np.array(losses_array).shape)
if 'beta' not in params_dict and 'lockdown_R0' in params_dict and 'T_inc' in params_dict:
params_dict['beta'] = np.divide(params_dict['lockdown_R0'], params_dict['T_inc'])
param_dist_stats = {}
for param in params_dict.keys():
mean = np.average(params_dict[param], weights=weights)
variance = np.average((params_dict[param] - mean)**2, weights=weights)
param_dist_stats[param] = {'mean':mean, 'std':np.sqrt(variance)}
df = pd.DataFrame(param_dist_stats)
return df.loc[['mean','std']]
def get_param_stats(model_dict, method='best', weighting='exp'):
"""Computes mean and variance for all the params accross different runs based on the method mentioned
Args:
model_dict (dict): Dict containing the predictions dict for all the runs for a given
scenario, config setting
method (str, optional): The method of aggregation of different runs ('best' or 'ensemble')
weighting (str, optional): The weighting function.
If 'exp', np.exp(-beta*loss) is the weighting function used. (beta is separate param here)
If 'inv', 1/loss is used. Else, uniform weighting is used. Defaults to 'exp'.
Returns:
dataframe containing mean, std for all the parameters
"""
if method == 'best':
return get_best_param_dist(model_dict)
elif method == 'ensemble':
return get_ensemble_combined(model_dict, weighting=weighting)
def get_loss_stats(model_dict, which_loss='train',method='best_loss_nora',weighting='exp',beta=1.0):
"""Computes mean and variance of loss values accross all the compartments for different runs
Args:
model_dict (dict): Dict containing the predictions dict for all the runs for a given
scenario, config setting
which_losses: Which losses have to considered? train or val
method (str, optional): The method of aggregation of different runs.
possible values: 'best_loss_nora', 'best_loss_ra', 'ensemble_loss_ra'
weighting (str, optional): The weighting function.
If 'exp', np.exp(-beta*loss) is the weighting function used. (beta is separate param here)
If 'inv', 1/loss is used. Else, uniform weighting is used. Defaults to 'exp'.
Returns:
dataframe containing mean, std loss values for all the compartments
"""
if method == 'best_loss_nora':
loss_vals = []
for _, run_dict in model_dict.items():
df = run_dict['df_loss'][which_loss]
df['agg'] = df.mean()
loss_vals.append(df)
df = pd.DataFrame(loss_vals).describe()
return df.loc[['mean','std']]
elif method == 'best_loss_ra':
losses_array = np.array([])
for _, run_dict in model_dict.items():
loss_array = run_dict['trials']['losses']
losses_array = np.append(losses_array, min(loss_array))
df = pd.DataFrame(columns=['agg'],index=['mean','std'])
df['agg']['mean'] = np.mean(losses_array)
df['agg']['std'] = np.std(losses_array)
return df
elif method == 'ensemble_loss_ra':
losses_array = np.array([])
for _, run_dict in model_dict.items():
loss_array = run_dict['trials']['losses']
losses_array = np.concatenate((losses_array, loss_array), axis=0)
df = | pd.DataFrame(columns=['agg'],index=['mean','std']) | pandas.DataFrame |
import json
import pandas as pd
class Inquinante(object):
def __init__(self, name, limit, method):
"""
name: name of air pollutter
limit: value of pollutter's threshold
method: 'daily', 'hourly', 'yearly'
"""
with open('static/data/default_info/agenti_e_centraline.json') as f:
agenti_centraline = json.load(f)
self.list_inquinanti = agenti_centraline['inquinanti']
self.list_centraline = agenti_centraline['centraline']
self.name = name
self.limit = limit
self.method = method
def pollutant_dataframe(self, df):
"""Returns the df wrt to the pollutant of interest.
df: dataframe to make computations on
"""
try:
assert self.name in self.list_inquinanti
return df[df['inquinante'] == self.name]
except AssertionError:
print ('The pollutant doesn\'t exist')
def average(self, df, anno):
"""Returns the average df wrt the year of interest.
df: dataframe to make computations on
anno: year of interest
"""
try:
assert self.name in self.list_inquinanti
if self.method == 'hourly':
return self._hourly_avg(df, anno)
elif self.method == 'daily':
return self._daily_avg(df, anno)
elif self.method == 'yearly':
return self._yearly_avg(df, anno)
else:
print ('You select an invalid method!')
except AssertionError:
print ('The inserted inquinante is not in the list of possible inquinanti')
def _hourly_avg(self, df, anno):
"""Returns the hourly average df wrt the year of interest.
df: dataframe to make computations on
anno: year of interest
"""
# Compute the mean over the df referring to the pullutter
df_media = self.pollutant_dataframe(df).groupby('data_ora_time').max().ix[-1]
#df_media = self.pollutant_dataframe(df).mean()
#df_std_media = pd.DataFrame(df_media/self.limit*100)
# Drop anno and ora columns
df_media.drop(['anno','ora','limite', 'inquinante','data_ora'], inplace=True)
#df_media.drop(['anno','ora','limite'], inplace=True)
#df_std_media.drop(['anno','ora'], inplace=True)
# Give to the remaining columns the name of the year
df_media = | pd.DataFrame(df_media) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib as mp
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from pandas.plotting import scatter_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
filePath='C:/your folder/'
# number of samples that will be analyzed in detail
sample_n =30
index_n=sample_n+1 # for loop use only
# find outlier samples
def find_outlier(mean_each_sample):
mean_all_smaples = np.mean(mean_each_sample)
std_all_samples =np.std(mean_each_sample)
for idx, val in mean_each_sample.iteritems():
z_score= (val - mean_all_smaples)/std_all_samples
if np.abs(z_score) > 3:
outlier_samples[idx]=val
return outlier_samples
# scale back values obtained from model training
def scale_back(scaler,centroids):
a=centroids.reshape()
b=scaler.inverse_transform(a)
return c
# generate column names (probe plus 946 samples)
cols=['probe']
for i in range(946):
x=i+1
colnm='sampl'+str(x)
cols.append(colnm)
# normalized human brain microarray gene expression data H0351.2001 (58692 probes * 946 samples)
# were downloaded from http://human.brain-map.org/static/download
data=pd.read_csv(filePath+"MicroarrayExpression.csv",names=cols,index_col='probe',sep=',')
probes= | pd.read_csv(filePath+"Probes.csv",index_col='probe_id',sep=',') | pandas.read_csv |
"""Tests for dynamic validator."""
from datetime import date, datetime
import numpy as np
import pandas as pd
from delphi_validator.report import ValidationReport
from delphi_validator.dynamic import DynamicValidator
class TestCheckRapidChange:
params = {"data_source": "", "span_length": 1,
"end_date": "2020-09-02", "expected_lag": {}}
def test_same_df(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
test_df = pd.DataFrame([date.today()] * 5, columns=["time_value"])
ref_df = pd.DataFrame([date.today()] * 5, columns=["time_value"])
validator.check_rapid_change_num_rows(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_0_vs_many(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
time_value = datetime.combine(date.today(), datetime.min.time())
test_df = pd.DataFrame([time_value] * 5, columns=["time_value"])
ref_df = pd.DataFrame([time_value] * 1, columns=["time_value"])
validator.check_rapid_change_num_rows(
test_df, ref_df, time_value, "geo", "signal", report)
assert len(report.raised_errors) == 1
assert "check_rapid_change_num_rows" in [
err.check_data_id[0] for err in report.raised_errors]
class TestCheckAvgValDiffs:
params = {"data_source": "", "span_length": 1,
"end_date": "2020-09-02", "expected_lag": {}}
def test_same_val(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [1, 1, 1, 2, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
test_df = pd.DataFrame(data)
ref_df = pd.DataFrame(data)
validator.check_avg_val_vs_reference(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_same_se(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [np.nan] * 6, "se": [1, 1, 1, 2, 0, 1],
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
test_df = pd.DataFrame(data)
ref_df = pd.DataFrame(data)
validator.check_avg_val_vs_reference(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_same_n(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [np.nan] * 6, "se": [np.nan] * 6,
"sample_size": [1, 1, 1, 2, 0, 1], "geo_id": ["1"] * 6}
test_df = pd.DataFrame(data)
ref_df = pd.DataFrame(data)
validator.check_avg_val_vs_reference(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_same_val_se_n(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [1, 1, 1, 2, 0, 1], "se": [1, 1, 1, 2, 0, 1],
"sample_size": [1, 1, 1, 2, 0, 1], "geo_id": ["1"] * 6}
test_df = pd.DataFrame(data)
ref_df = pd.DataFrame(data)
validator.check_avg_val_vs_reference(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_10x_val(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
test_data = {"val": [1, 1, 1, 20, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
ref_data = {"val": [1, 1, 1, 2, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
test_df = pd.DataFrame(test_data)
ref_df = pd.DataFrame(ref_data)
validator.check_avg_val_vs_reference(
test_df, ref_df,
datetime.combine(date.today(), datetime.min.time()), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_100x_val(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
test_data = {"val": [1, 1, 1, 200, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
ref_data = {"val": [1, 1, 1, 2, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
test_df = pd.DataFrame(test_data)
ref_df = pd.DataFrame(ref_data)
validator.check_avg_val_vs_reference(
test_df, ref_df,
datetime.combine(date.today(), datetime.min.time()), "geo", "signal", report)
assert len(report.raised_errors) == 1
assert "check_test_vs_reference_avg_changed" in [
err.check_data_id[0] for err in report.raised_errors]
def test_1000x_val(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
test_data = {"val": [1, 1, 1, 2000, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
ref_data = {"val": [1, 1, 1, 2, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
test_df = pd.DataFrame(test_data)
ref_df = pd.DataFrame(ref_data)
validator.check_avg_val_vs_reference(
test_df, ref_df,
datetime.combine(date.today(), datetime.min.time()), "geo", "signal", report)
assert len(report.raised_errors) == 1
assert "check_test_vs_reference_avg_changed" in [
err.check_data_id[0] for err in report.raised_errors]
class TestDataOutlier:
params = {"data_source": "", "span_length": 1,
"end_date": "2020-09-02", "expected_lag": {}}
pd.set_option("display.max_rows", None, "display.max_columns", None)
# Test to determine outliers based on the row data, has lead and lag outlier
def test_pos_outlier(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
ref_val = [30, 30.28571429, 30.57142857, 30.85714286, 31.14285714,
31.42857143, 31.71428571, 32, 32, 32.14285714,
32.28571429, 32.42857143, 32.57142857, 32.71428571,
32.85714286, 33, 33, 33, 33, 33, 33, 33, 33,
33, 33, 33, 33.28571429, 33.57142857, 33.85714286, 34.14285714]
test_val = [100, 100, 100]
ref_data = {"val": ref_val , "se": [np.nan] * len(ref_val),
"sample_size": [np.nan] * len(ref_val), "geo_id": ["1"] * len(ref_val),
"time_value": pd.date_range(start="2020-09-24", end="2020-10-23")}
test_data = {"val": test_val , "se": [np.nan] * len(test_val),
"sample_size": [np.nan] * len(test_val), "geo_id": ["1"] * len(test_val),
"time_value": pd.date_range(start="2020-10-24", end="2020-10-26")}
ref_data2 = {"val": ref_val , "se": [np.nan] * len(ref_val),
"sample_size": [np.nan] * len(ref_val), "geo_id": ["2"] * len(ref_val),
"time_value": | pd.date_range(start="2020-09-24", end="2020-10-23") | pandas.date_range |
# import pandas
import pandas as pd
pd.options.display.float_format = '{:.0f}'.format
pd.set_option('display.width', 85)
pd.set_option('display.max_columns', 5)
# import the land temperature data
percapitaGDP = pd.read_excel("data/GDPpercapita.xlsx",
sheet_name="OECD.Stat export",
skiprows=4,
skipfooter=1,
usecols="A,C:T")
percapitaGDP.head()
percapitaGDP.info()
# rename the Year column to metro
percapitaGDP.rename(columns={'Year':'metro'}, inplace=True)
percapitaGDP.metro.str.startswith(' ').any()
percapitaGDP.metro.str.endswith(' ').any()
percapitaGDP.metro = percapitaGDP.metro.str.strip()
# convert the data columns to numeric
for col in percapitaGDP.columns[1:]:
percapitaGDP[col] = | pd.to_numeric(percapitaGDP[col], errors='coerce') | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""part3_pricing_model_linear
"""
import numpy as np
import pickle
import pandas as pd
import sklearn
import keras
from keras import Sequential
from keras.layers import LeakyReLU
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop, Adam
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.metrics import roc_auc_score
from sklearn.linear_model import LogisticRegression
from sklearn import linear_model
from sklearn.feature_selection import f_classif, mutual_info_classif
from sklearn.metrics import confusion_matrix
from imblearn.over_sampling import SMOTE
import seaborn as sns
class PricingModel():
def __init__(self, calibrate_probabilities=False):
self.calibrate = calibrate_probabilities
self.batch_size = 150
self.epochs = 10
self.X_raw = pd.read_csv('part3_data.csv')
self.claims_raw = np.array( | pd.read_csv('part3_data.csv') | pandas.read_csv |
# Copyright 2019 Toyota Research Institute. All rights reserved.
"""
Module and scripts for generating descriptors (quantities listed
in cell_analysis.m) from cycle-level summary statistics.
Usage:
featurize [INPUT_JSON]
Options:
-h --help Show this screen
--version Show version
The `featurize` script will generate features according to the methods
contained in beep.featurize. It places output files corresponding to
features in `/data-share/features/`.
The input json must contain the following fields
* `file_list` - a list of processed cycler runs for which to generate features
The output json file will contain the following:
* `file_list` - a list of filenames corresponding to the locations of the features
Example:
```angular2
$ featurize '{"invalid_file_list": ["/data-share/renamed_cycler_files/FastCharge/FastCharge_0_CH33.csv",
"/data-share/renamed_cycler_files/FastCharge/FastCharge_1_CH44.csv"],
"file_list": ["/data-share/structure/FastCharge_2_CH29_structure.json"]}'
{"file_list": ["/data-share/features/FastCharge_2_CH29_full_model_features.json"]}
```
"""
import os
import json
import numpy as np
import pandas as pd
from docopt import docopt
from monty.json import MSONable
from monty.serialization import loadfn, dumpfn
from scipy.stats import skew, kurtosis
from beep.collate import scrub_underscore_suffix, add_suffix_to_filename
from beep.utils import KinesisEvents
from beep import logger, ENVIRONMENT, __version__
s = {'service': 'DataAnalyzer'}
class DegradationPredictor(MSONable):
"""
Object corresponding to feature matrix. Includes constructors
to initialize the feature vectors.
Attributes:
name (str): predictor object name.
X (pandas.DataFrame): data as records x features.
y (pandas.DataFrame): targets.
feature_labels (list): feature labels.
predict_only (bool): True/False to specify predict/train mode.
prediction_type (str): Type of regression - 'single' vs 'multi'.
predicted_quantity (str): 'cycle' or 'capacity'.
nominal_capacity (float):
"""
def __init__(self, name, X, feature_labels=None, y=None, nominal_capacity=1.1,
predict_only=False, predicted_quantity="cycle", prediction_type="multi"):
"""
Args:
name (str): predictor object name
X (pandas.DataFrame): features in DataFrame format.
name (str): name of method for featurization.
y (pandas.Dataframe or float): one or more outcomes.
predict_only (bool): True/False to specify predict/train mode.
predicted_quantity (str): 'cycle' or 'capacity'.
prediction_type (str): Type of regression - 'single' vs 'multi'.
"""
self.name = name
self.X = X
self.feature_labels = feature_labels
self.predict_only = predict_only
self.prediction_type = prediction_type
self.predicted_quantity = predicted_quantity
self.y = y
self.nominal_capacity = nominal_capacity
@classmethod
def from_processed_cycler_run_file(cls, path, features_label='full_model', predict_only=False,
predicted_quantity='cycle', prediction_type='multi',
diagnostic_features=False):
"""
Args:
path (str): string corresponding to file path with ProcessedCyclerRun object.
features_label (str): name of method for featurization.
predict_only (bool): True/False to specify predict/train mode.
predicted_quantity (str): 'cycle' or 'capacity'.
prediction_type (str): Type of regression - 'single' vs 'multi'.
diagnostic_features (bool): whether to compute diagnostic features.
"""
processed_cycler_run = loadfn(path)
if features_label == 'full_model':
return cls.init_full_model(processed_cycler_run, predict_only=predict_only,
predicted_quantity=predicted_quantity,
diagnostic_features=diagnostic_features,
prediction_type=prediction_type)
else:
raise NotImplementedError
@classmethod
def init_full_model(cls, processed_cycler_run, init_pred_cycle=10, mid_pred_cycle=91,
final_pred_cycle=100, predict_only=False, prediction_type='multi',
predicted_quantity="cycle", diagnostic_features=False):
"""
Generate features listed in early prediction manuscript
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun): information about cycler run
init_pred_cycle (int): index of initial cycle index used for predictions
mid_pred_cycle (int): index of intermediate cycle index used for predictions
final_pred_cycle (int): index of highest cycle index used for predictions
predict_only (bool): whether or not to include cycler life in the object
prediction_type (str): 'single': cycle life to reach 80% capacity.
'multi': remaining capacity at fixed cycles
predicted_quantity (str): quantity being predicted - cycles/capacity
diagnostic_features (bool): whether or not to compute diagnostic features
Returns:
beep.featurize.DegradationPredictor: DegradationPredictor corresponding to the ProcessedCyclerRun file.
"""
assert mid_pred_cycle > 10, 'Insufficient cycles for analysis'
assert final_pred_cycle > mid_pred_cycle, 'Must have final_pred_cycle > mid_pred_cycle'
ifinal = final_pred_cycle - 1 # python indexing
imid = mid_pred_cycle - 1
iini = init_pred_cycle - 1
summary = processed_cycler_run.summary
assert len(processed_cycler_run.summary) > final_pred_cycle, 'cycle count must exceed final_pred_cycle'
cycles_to_average_over = 40 # For nominal capacity, use median discharge capacity of first n cycles
interpolated_df = processed_cycler_run.cycles_interpolated
X = pd.DataFrame(np.zeros((1, 20)))
labels = []
# Discharge capacity, cycle 2 = Q(n=2)
X[0] = summary.discharge_capacity[1]
labels.append("discharge_capacity_cycle_2")
# Max discharge capacity - discharge capacity, cycle 2 = max_n(Q(n)) - Q(n=2)
X[1] = max(summary.discharge_capacity[np.arange(final_pred_cycle)] - summary.discharge_capacity[1])
labels.append("max_discharge_capacity_difference")
# Discharge capacity, cycle 100 = Q(n=100)
X[2] = summary.discharge_capacity[ifinal]
labels.append("discharge_capacity_cycle_100")
# Feature representing time-temperature integral over cycles 2 to 100
X[3] = np.nansum(summary.time_temperature_integrated[np.arange(final_pred_cycle)])
labels.append("integrated_time_temperature_cycles_1:100")
# Mean of charge times of first 5 cycles
X[4] = np.nanmean(summary.charge_duration[1:6])
labels.append("charge_time_cycles_1:5")
# Descriptors based on capacity loss between cycles 10 and 100.
Qd_final = interpolated_df.discharge_capacity[interpolated_df.cycle_index == ifinal]
Qd_10 = interpolated_df.discharge_capacity[interpolated_df.cycle_index == 9]
Vd = interpolated_df.voltage[interpolated_df.cycle_index == iini]
Qd_diff = Qd_final.values - Qd_10.values
X[5] = np.log10(np.abs(np.min(Qd_diff))) # Minimum
labels.append("abs_min_discharge_capacity_difference_cycles_2:100")
X[6] = np.log10(np.abs(np.mean(Qd_diff))) # Mean
labels.append("abs_mean_discharge_capacity_difference_cycles_2:100")
X[7] = np.log10(np.abs(np.var(Qd_diff))) # Variance
labels.append("abs_variance_discharge_capacity_difference_cycles_2:100")
X[8] = np.log10(np.abs(skew(Qd_diff))) # Skewness
labels.append("abs_skew_discharge_capacity_difference_cycles_2:100")
X[9] = np.log10(np.abs(kurtosis(Qd_diff))) # Kurtosis
labels.append("abs_kurtosis_discharge_capacity_difference_cycles_2:100")
X[10] = np.log10(np.abs(Qd_diff[0])) # First difference
labels.append("abs_first_discharge_capacity_difference_cycles_2:100")
X[11] = max(summary.temperature_maximum[list(range(1, final_pred_cycle))]) # Max T
labels.append("max_temperature_cycles_1:100")
X[12] = min(summary.temperature_minimum[list(range(1, final_pred_cycle))]) # Min T
labels.append("min_temperature_cycles_1:100")
# Slope and intercept of linear fit to discharge capacity as a fn of cycle #, cycles 2 to 100
X[13], X[14] = np.polyfit(
list(range(1, final_pred_cycle)),
summary.discharge_capacity[list(range(1, final_pred_cycle))], 1)
labels.append("slope_discharge_capacity_cycle_number_2:100")
labels.append("intercept_discharge_capacity_cycle_number_2:100")
# Slope and intercept of linear fit to discharge capacity as a fn of cycle #, cycles 91 to 100
X[15], X[16] = np.polyfit(
list(range(imid, final_pred_cycle)),
summary.discharge_capacity[list(range(imid, final_pred_cycle))], 1)
labels.append("slope_discharge_capacity_cycle_number_91:100")
labels.append("intercept_discharge_capacity_cycle_number_91:100")
IR_trend = summary.dc_internal_resistance[list(range(1, final_pred_cycle))]
if any(v == 0 for v in IR_trend):
IR_trend[IR_trend == 0] = np.nan
# Internal resistance minimum
X[17] = np.nanmin(IR_trend)
labels.append("min_internal_resistance_cycles_2:100")
# Internal resistance at cycle 2
X[18] = summary.dc_internal_resistance[1]
labels.append("internal_resistance_cycle_2")
# Internal resistance at cycle 100 - cycle 2
X[19] = summary.dc_internal_resistance[ifinal] - summary.dc_internal_resistance[1]
labels.append("internal_resistance_difference_cycles_2:100")
if diagnostic_features:
X_diagnostic, labels_diagnostic = init_diagnostic_features(processed_cycler_run)
X = pd.concat([X, X_diagnostic], axis=1, sort=False)
labels = labels + labels_diagnostic
X.columns = labels
if predict_only:
y = None
else:
if prediction_type == 'single':
y = processed_cycler_run.get_cycle_life()
elif prediction_type == 'multi':
if predicted_quantity == 'cycle':
y = processed_cycler_run.cycles_to_reach_set_capacities(
thresh_max_cap=0.98, thresh_min_cap=0.78, interval_cap=0.03)
elif predicted_quantity == 'capacity':
y = processed_cycler_run.capacities_at_set_cycles()
else:
raise NotImplementedError(
"{} predicted_quantity type not implemented".format(
predicted_quantity))
nominal_capacity = np.median(summary.discharge_capacity.iloc[0:cycles_to_average_over])
return cls('full_model', X, feature_labels=labels, y=y,
nominal_capacity=nominal_capacity, predict_only=predict_only,
prediction_type=prediction_type, predicted_quantity=predicted_quantity)
def as_dict(self):
"""
Method for dictionary serialization
Returns:
dict: corresponding to dictionary for serialization
"""
obj = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"name": self.name,
"X": self.X.to_dict("list"),
"feature_labels": self.feature_labels,
"predict_only": self.predict_only,
"prediction_type": self.prediction_type,
"nominal_capacity":self.nominal_capacity
}
if isinstance(self.y, pd.DataFrame):
obj["y"] = self.y.to_dict("list")
else:
obj["y"] = self.y
return obj
@classmethod
def from_dict(cls, d):
"""MSONable deserialization method"""
d['X'] = | pd.DataFrame(d['X']) | pandas.DataFrame |
from math import pi
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.optimize import minimize_scalar
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "0.1"
__license__ = "MIT"
# gravitational acceleration
g = 9.81 # m/s²
# kinematic viscosity
ny = 1.3e-6 # m^2/s (10°C water)
# _________________________________________________________________________________________________________________
def log_scale(start, end, minor=False, lower=None, upper=None):
"""
get the log scale ticks for the diagram
Args:
start (int):
end (int):
minor (bool):
lower (int | float):
upper (int | float):
Returns:
numpy.array: ticks of the scale
"""
if minor:
std = np.array([1., 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.,
2.2, 2.4, 2.6, 2.8, 3., 3.2, 3.4, 3.6, 3.8, 4., 4.2,
4.4, 4.6, 4.8, 5., 5.5, 6., 6.5, 7., 7.5, 8., 8.5,
9., 9.5, 10.])
else:
std = np.array([1., 1.5, 2., 3., 4., 5., 6., 8., 10.])
res = np.array([])
for x in range(start, end):
res = np.append(res, std * 10. ** x)
res = np.unique(res.round(3))
if lower is not None:
res = res[res >= lower]
if upper is not None:
res = res[res <= upper]
return res
def nomogram(k=0.1):
"""
make the nomogram
Args:
k (float): roughness in (mm)
Returns:
matplotlib.pyplot.Figure: of the plot
"""
# diameter
d = np.array(
[0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.1, 0.125, 0.15, 0.2, 0.25, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]) # m
# velocity
v = np.array(
[0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0,
6.0, 7.0, 8.0, 9.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0]) # m/s
# head loss
J = log_scale(-1, 3, minor=True) # mm/m
J_labels = log_scale(-1, 3, minor=False)
# flow
Q = log_scale(-1, 5, minor=True, upper=20000) # L/s
Q_labels = log_scale(-1, 5, minor=False, upper=20000)
# _________________________________________________________________________________________________________________
def area(d):
return d ** 2 * pi / 4
# _________________________________________________________________________________________________________________
def velocity(J, d):
return -2 * np.log10(2.51 * ny / (d * np.sqrt(2 * g * (J / 1000) * d)) +
(k / 1000) / (3.71 * d)) * \
np.sqrt(2 * g * d * (J / 1000))
# _________________________________________________________________________________________________________________
def get_diameter(v, J):
res = minimize_scalar(lambda x: abs(velocity(J, x) - v), bounds=(min(d), max(d)), method='bounded').x
if (round(res, 5) >= max(d)) or (round(res, 5) <= min(d)):
return np.NaN
return res
# _________________________________________________________________________________________________________________
fig, ax = plt.subplots()
def bbox(pad):
return {'facecolor': 'white', 'alpha': 0.8, 'pad': pad, 'linewidth': 0}
# _________________________________________________________________________________________________________________
# diameter lines
df_d = | pd.DataFrame(index=J, columns=d) | pandas.DataFrame |
"""
Asset selling driver script
"""
from collections import namedtuple
import pandas as pd
import numpy as np
from AssetSellingModel_Q3 import AssetSellingModel
from AssetSellingPolicy_Q3 import AssetSellingPolicy
import matplotlib.pyplot as plt
from copy import copy
import math
import time
plt.rcParams["figure.figsize"] = (15,8)
if __name__ == "__main__":
# read in policy parameters from an Excel spreadsheet, "asset_selling_policy_parameters.xlsx"
sheet1 = pd.read_excel("asset_selling_policy_parameters.xlsx", sheet_name="Sheet1")
params = zip(sheet1['param1'], sheet1['param2'])
param_list = list(params)
sheet2 = pd.read_excel("asset_selling_policy_parameters.xlsx", sheet_name="Sheet2")
sheet3 = pd.read_excel("asset_selling_policy_parameters.xlsx", sheet_name="Sheet3")
biasdf = pd.read_excel("asset_selling_policy_parameters.xlsx", sheet_name="Sheet4")
policy_selected = sheet3['Policy'][0]
T = sheet3['TimeHorizon'][0]
gamma = sheet3['DiscountFactor'][0]
initPrice = sheet3['InitialPrice'][0]
initBias = sheet3['InitialBias'][0]
exog_params = {'UpStep':sheet3['UpStep'][0],'DownStep':sheet3['DownStep'][0],'Variance':sheet3['Variance'][0],'biasdf':biasdf}
nIterations = sheet3['Iterations'][0]
printStep = sheet3['PrintStep'][0]
printIterations = [0]
printIterations.extend(list(reversed(range(nIterations-1,0,-printStep))))
print("exog_params ",exog_params)
# initialize the model and the policy
policy_names = ['sell_low', 'high_low', 'track']
#####
state_names = ['price', 'resource','bias', 'prev_price', 'prev_price2']
init_state = {'price': initPrice, 'resource': 1,'bias':initBias, \
'prev_price':initPrice, 'prev_price2':initPrice}
#####
decision_names = ['sell', 'hold']
M = AssetSellingModel(state_names, decision_names, init_state,exog_params,T,gamma)
P = AssetSellingPolicy(M, policy_names)
t = 0
prev_price = init_state['price']
# make a policy_info dict object
policy_info = {'sell_low': param_list[0],
'high_low': param_list[1],
'track': param_list[2] + (prev_price, prev_price)}
print("Parameters track!!!!!!!!!!!! ",policy_info['track'])
start = time.time()
#####
if not policy_selected in ['full_grid','track']:
#####
#print("Selected policy {}, time horizon {}, initial price {} and number of iterations {}".format(policy_selected,T,initPrice,
# ))
contribution_iterations=[P.run_policy(param_list, policy_info, policy_selected, t) for ite in list(range(nIterations))]
contribution_iterations = | pd.Series(contribution_iterations) | pandas.Series |
import os
import pandas as pd
import shutil
import argparse
import copy
parser = argparse.ArgumentParser()
parser.add_argument('--players', nargs='*', default=["shox"])
parser.add_argument('--stats', nargs='*', default=["4K", "5K", "1v3 won", "1v4 won", "1v5 won", "Knife"])
parser.add_argument('--demo_folder', default="C:\\Program Files (x86)\\Steam\\steamapps\\common\\Counter-Strike Global Offensive\\csgo\\replays")
parser.add_argument('--copy_demos', default='False', type=str)
args = parser.parse_args()
PLAYERS = args.players
INTERESTING_STATS = ["Name"] + args.stats
DEMO_FOLDER = args.demo_folder
excel_files = os.listdir()
out_df = pd.DataFrame()
for excel_file in excel_files:
if(excel_file.find(".xlsx") > 0):
player_stats = | pd.read_excel(excel_file, sheet_name=1, engine="openpyxl", header=0) | pandas.read_excel |
import os
import numpy as np
import pandas as pd
from pkg_resources import resource_filename
def load_arrests(return_X_y=False, give_pandas=False):
"""
Loads the arrests dataset which can serve as a benchmark for fairness. It is data on
the police treatment of individuals arrested in Toronto for simple possession of small
quantities of marijuana. The goal is to predict whether or not the arrestee was released
with a summons while maintaining a degree of fairness.
:param return_X_y: If True, returns ``(data, target)`` instead of a dict object.
:param give_pandas: give the pandas dataframe instead of X, y matrices (default=False)
:Example:
>>> from sklego.datasets import load_arrests
>>> X, y = load_arrests(return_X_y=True)
>>> X.shape
(5226, 7)
>>> y.shape
(5226,)
>>> load_arrests(give_pandas=True).columns
Index(['released', 'colour', 'year', 'age', 'sex', 'employed', 'citizen',
'checks'],
dtype='object')
The dataset was copied from the carData R package and can originally be found in:
- Personal communication from <NAME>, York University.
The documentation page of the dataset from the package can be viewed here:
http://vincentarelbundock.github.io/Rdatasets/doc/carData/Arrests.html
"""
filepath = resource_filename("sklego", os.path.join("data", "arrests.zip"))
df = pd.read_csv(filepath)
if give_pandas:
return df
X, y = (
df[["colour", "year", "age", "sex", "employed", "citizen", "checks"]].values,
df["released"].values,
)
if return_X_y:
return X, y
return {"data": X, "target": y}
def load_chicken(return_X_y=False, give_pandas=False):
"""
Loads the chicken dataset. The chicken data has 578 rows and 4 columns
from an experiment on the effect of diet on early growth of chicks.
The body weights of the chicks were measured at birth and every second
day thereafter until day 20. They were also measured on day 21.
There were four groups on chicks on different protein diets.
:param return_X_y: If True, returns ``(data, target)`` instead of a dict object.
:param give_pandas: give the pandas dataframe instead of X, y matrices (default=False)
:Example:
>>> from sklego.datasets import load_chicken
>>> X, y = load_chicken(return_X_y=True)
>>> X.shape
(578, 3)
>>> y.shape
(578,)
>>> load_chicken(give_pandas=True).columns
Index(['weight', 'time', 'chick', 'diet'], dtype='object')
The datasets can be found in the following sources:
- Crowder, M. and <NAME>. (1990), Analysis of Repeated Measures, Chapman and Hall (example 5.3)
- Hand, D. and <NAME>. (1996), Practical Longitudinal Data Analysis, Chapman and Hall (table A.2)
"""
filepath = resource_filename("sklego", os.path.join("data", "chickweight.zip"))
df = pd.read_csv(filepath)
if give_pandas:
return df
if give_pandas:
return df
X, y = df[["time", "diet", "chick"]].values, df["weight"].values
if return_X_y:
return X, y
return {"data": X, "target": y}
def load_abalone(return_X_y=False, give_pandas=False):
"""
Loads the abalone dataset where the goal is to predict the gender of the creature.
:param return_X_y: If True, returns ``(data, target)`` instead of a dict object.
:param give_pandas: give the pandas dataframe instead of X, y matrices (default=False)
:Example:
>>> from sklego.datasets import load_abalone
>>> X, y = load_abalone(return_X_y=True)
>>> X.shape
(4177, 8)
>>> y.shape
(4177,)
>>> load_abalone(give_pandas=True).columns
Index(['sex', 'length', 'diameter', 'height', 'whole_weight', 'shucked_weight',
'viscera_weight', 'shell_weight', 'rings'],
dtype='object')
The dataset was copied from Kaggle and can originally be found in: can be found in the following sources:
- <NAME>, <NAME>, <NAME>, <NAME> and <NAME> (1994)
"The Population Biology of Abalone (_Haliotis_ species) in Tasmania."
Sea Fisheries Division, Technical Report No. 48 (ISSN 1034-3288)
"""
filepath = resource_filename("sklego", os.path.join("data", "abalone.zip"))
df = pd.read_csv(filepath)
if give_pandas:
return df
X = df[
[
"length",
"diameter",
"height",
"whole_weight",
"shucked_weight",
"viscera_weight",
"shell_weight",
"rings",
]
].values
y = df["sex"].values
if return_X_y:
return X, y
return {"data": X, "target": y}
def load_heroes(return_X_y=False, give_pandas=False):
"""
A dataset from a video game: "heroes of the storm". The goal of the dataset
is to predict the attack type. Note that the pandas dataset returns more information.
This is because we wanted to keep the X simple in the return_X_y case.
:param return_X_y: If True, returns ``(data, target)`` instead of a dict object.
:param give_pandas: give the pandas dataframe instead of X, y matrices (default=False)
:Example:
>>> X, y = load_heroes(return_X_y=True)
>>> X.shape
(84, 2)
>>> y.shape
(84,)
>>> df = load_heroes(give_pandas=True)
>>> df.columns
Index(['name', 'attack_type', 'role', 'health', 'attack', 'attack_spd'], dtype='object')
"""
filepath = resource_filename("sklego", os.path.join("data", "heroes.zip"))
df = | pd.read_csv(filepath) | pandas.read_csv |
import pytest
import pytz
import dateutil
import numpy as np
from datetime import datetime
from dateutil.tz import tzlocal
import pandas as pd
import pandas.util.testing as tm
from pandas import (DatetimeIndex, date_range, Series, NaT, Index, Timestamp,
Int64Index, Period)
class TestDatetimeIndex(object):
def test_astype(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timestamp('2016-05-16')] + [NaT] * 3, dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([1463356800000000000] +
[-9223372036854775808] * 3, dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
tm.assert_index_equal(result, Index(rng.asi8))
tm.assert_numpy_array_equal(result.values, rng.asi8)
def test_astype_with_tz(self):
# with tz
rng = date_range('1/1/2000', periods=10, tz='US/Eastern')
result = rng.astype('datetime64[ns]')
expected = (date_range('1/1/2000', periods=10,
tz='US/Eastern')
.tz_convert('UTC').tz_localize(None))
tm.assert_index_equal(result, expected)
# BUG#10442 : testing astype(str) is correct for Series/DatetimeIndex
result = pd.Series(pd.date_range('2012-01-01', periods=3)).astype(str)
expected = pd.Series(
['2012-01-01', '2012-01-02', '2012-01-03'], dtype=object)
tm.assert_series_equal(result, expected)
result = Series(pd.date_range('2012-01-01', periods=3,
tz='US/Eastern')).astype(str)
expected = Series(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
dtype=object)
tm.assert_series_equal(result, expected)
# GH 18951: tz-aware to tz-aware
idx = date_range('20170101', periods=4, tz='US/Pacific')
result = idx.astype('datetime64[ns, US/Eastern]')
expected = date_range('20170101 03:00:00', periods=4, tz='US/Eastern')
tm.assert_index_equal(result, expected)
# GH 18951: tz-naive to tz-aware
idx = date_range('20170101', periods=4)
result = idx.astype('datetime64[ns, US/Eastern]')
expected = date_range('20170101', periods=4, tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_astype_str_compat(self):
# GH 13149, GH 13209
# verify that we are returning NaT as a string (and not unicode)
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(str)
expected = Index(['2016-05-16', 'NaT', 'NaT', 'NaT'], dtype=object)
tm.assert_index_equal(result, expected)
def test_astype_str(self):
# test astype string - #10442
result = date_range('2012-01-01', periods=4,
name='test_name').astype(str)
expected = Index(['2012-01-01', '2012-01-02', '2012-01-03',
'2012-01-04'], name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with tz and name
result = date_range('2012-01-01', periods=3, name='test_name',
tz='US/Eastern').astype(str)
expected = Index(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and name
result = date_range('1/1/2011', periods=3, freq='H',
name='test_name').astype(str)
expected = Index(['2011-01-01 00:00:00', '2011-01-01 01:00:00',
'2011-01-01 02:00:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and timezone
result = date_range('3/6/2012 00:00', periods=2, freq='H',
tz='Europe/London', name='test_name').astype(str)
expected = Index(['2012-03-06 00:00:00+00:00',
'2012-03-06 01:00:00+00:00'],
dtype=object, name='test_name')
tm.assert_index_equal(result, expected)
def test_astype_datetime64(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype('datetime64[ns]')
tm.assert_index_equal(result, idx)
assert result is not idx
result = idx.astype('datetime64[ns]', copy=False)
tm.assert_index_equal(result, idx)
assert result is idx
idx_tz = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN], tz='EST')
result = idx_tz.astype('datetime64[ns]')
expected = DatetimeIndex(['2016-05-16 05:00:00', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]')
tm.assert_index_equal(result, expected)
def test_astype_object(self):
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
tm.assert_index_equal(casted, Index(exp_values, dtype=np.object_))
assert casted.tolist() == exp_values
@pytest.mark.parametrize('tz', [None, 'Asia/Tokyo'])
def test_astype_object_tz(self, tz):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz=tz)
expected_list = [Timestamp('2013-01-31', tz=tz),
Timestamp('2013-02-28', tz=tz),
Timestamp('2013-03-31', tz=tz),
Timestamp('2013-04-30', tz=tz)]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.astype(object)
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype_object_with_nat(self):
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.astype(object)
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
import cv2
import numpy as np
import pandas as pd
from easygui import *
from scipy.stats.mstats import skew
from os import listdir
from os.path import isfile, join
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import matplotlib.pyplot as plt
from scipy.stats import skew
from sklearn import preprocessing
np.set_printoptions(precision=1)
np.set_printoptions(threshold=np.inf)
# Function to load images
# Supports color image and Y channel
def load_img_by_id(imgPath, return_gray=True):
img = cv2.imread(imgPath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
if return_gray:
return img[:, :, 0]
return img
# Function to compute color moments
def compute_color_moments(img, x_window_size=100, y_window_size=100):
x_num_windows = img.shape[0] / x_window_size
y_num_windows = img.shape[1] / y_window_size
x_splitted = np.split(img, x_num_windows)
y_splitted = map(lambda x: np.asarray(np.split(x, y_num_windows, axis=1)), x_splitted)
y_splitted_xsize_ysize = np.asarray(y_splitted)
mean_Y = np.mean(y_splitted_xsize_ysize[:, :, :, :, 0], (2, 3), dtype=np.float32)
mean_U = np.mean(y_splitted_xsize_ysize[:, :, :, :, 2], (2, 3), dtype=np.float32)
mean_V = np.mean(y_splitted_xsize_ysize[:, :, :, :, 1], (2, 3), dtype=np.float32)
std_Y = np.std(y_splitted_xsize_ysize[:, :, :, :, 0], (2, 3), dtype=np.float32)
std_U = np.std(y_splitted_xsize_ysize[:, :, :, :, 1], (2, 3), dtype=np.float32)
std_V = np.std(y_splitted_xsize_ysize[:, :, :, :, 2], (2, 3), dtype=np.float32)
skew_Y = skew(y_splitted_xsize_ysize[:, :, :, :, 0].reshape(x_num_windows, y_num_windows,
x_window_size * y_window_size), axis=2)
skew_U = skew(y_splitted_xsize_ysize[:, :, :, :, 1].reshape(x_num_windows, y_num_windows,
x_window_size * y_window_size), axis=2)
skew_V = skew(y_splitted_xsize_ysize[:, :, :, :, 2].reshape(x_num_windows, y_num_windows,
x_window_size * y_window_size), axis=2)
all_features = np.dstack((mean_Y, std_Y, skew_Y, mean_U, std_U, skew_U, mean_V, std_V, skew_V))
return all_features.reshape(x_num_windows * y_num_windows * all_features.shape[2])
# Function to compute sift features
def compute_sift_features(img_gray):
sift = cv2.xfeatures2d.SIFT_create()
keyPoints, desc = sift.detectAndCompute(img_gray, None)
return np.asarray(desc)
# Function to execute a given feature extraction technique per folder
def compute_features_by_folder(folder_name, ftr_comp_func, return_gray=True,
folder_base_path='/Users/vedavyas/Desktop/CSE515/dataset/'):
folder_path = folder_base_path + folder_name + '/'
fileNames = [f for f in listdir(folder_path) if (isfile(join(folder_path, f)) and not (f.startswith('.')))]
feature_list = []
for fileName in fileNames:
print(fileName)
# print folder_path
img = load_img_by_id(fileName, return_gray)
features = ftr_comp_func(img)
feature_list.append(features)
return pd.DataFrame({'FileName': fileNames, 'Features': feature_list})
# Function to execute a given feature extraction technique per single file
def compute_features_by_file(file_name, ftr_comp_func, return_gray=True,
file_base_path='/Users/vedavyas/Desktop/CSE515/dataset/Hands_Test/'):
feature_list = []
fileNames = [file_name]
print(file_name)
img = load_img_by_id(file_name, return_gray)
features = ftr_comp_func(img)
feature_list.append(features)
return pd.DataFrame({'FileName': fileNames, 'Features': feature_list})
# Function to Bag of Visual Words
def compute_BOVW(feature_descriptors, n_clusters=100):
print("Bag of visual words with clusters: ", n_clusters)
# print feature_descriptors.shape
combined_features = np.vstack(np.array(feature_descriptors))
print("Size of stacked features: ", combined_features.shape)
std_scaler = StandardScaler()
combined_features = std_scaler.fit_transform(combined_features)
print("Starting K-means training")
kmeans = KMeans(n_clusters=n_clusters, random_state=777).fit(combined_features)
print("Finished K-means training, moving on to prediction")
bovw_vector = np.zeros([len(feature_descriptors), n_clusters])
for index, features in enumerate(feature_descriptors):
features_scaled = std_scaler.transform(features)
for i in kmeans.predict(features_scaled):
bovw_vector[index, i] += 1
bovw_vector_normalized = preprocessing.normalize(bovw_vector, norm='l2')
print("Finished K-means")
return list(bovw_vector_normalized)
# Function to save features for the purpose of viewing in a human readable format
def save_features_for_view(filename, feature_df_input, save_folder='FileFeatures',
save_base_path='/Users/vedavyas/Desktop/CSE515/dataset/'):
feature_df = feature_df_input.copy()
| pd.set_option('display.max_colwidth', -1) | pandas.set_option |
import os
import re
import itertools
from collections import defaultdict, namedtuple
import pandas as pd
import numpy as np
import ruamel_yaml as yaml
from . import utils
from . import lint_functions
import logging
logger = logging.getLogger(__name__)
"""
QC checks (linter) for recipes, returning a TSV of issues identified.
The strategy here is to use simple functions that do a single check on
a recipe. When run on a single recipe it can be used for linting new
contributions; when run on all recipes it helps highlight entire classes of
problems to be addressed.
See the `lint_functions` module for these.
After writing the function, register it in the global `registry` dict,
`lint_functions.registry`.
The output is a TSV where the "info" column contains the dicts returned by
each check function, and this column is expanded into multiple extra colums.
While this results in a lot of NaNs, it makes it easy to drop non-interesting
cases with pandas, e.g.,
recipes_with_missing_tests = df.dropna(subset=['no_tests'])
or
def not_in_bioconda(x):
if not isinstance(x, set):
return np.nan
res = set(x).difference(['bioconda'])
if len(res):
return(res)
return np.nan
df['other'] = df.exists_in_channel.apply(not_in_bioconda)
other_channels = df[['recipe', 'other']].dropna()
---------------------------------------------------------------------------
TODO:
- check version and build number against master branch. I think there's stuff
in bioconductor updating to handle this sort of thing. Also bioconda_utils
has utils for checking against master branch.
- if version changed, ensure build number is 0
- if version unchanged, ensure build number incremented
- currently we don't pay attention to py27/py3. It would be nice to handle
that.
- how to define valid licenses?
(conda_build.metadata.ensure_valid_license_family is for family)
- gcc/llvm have their respective preprocessing selectors
- excessive comments (from skeletons?)
"""
usage = """
Perform various checks on recipes.
"""
class LintArgs(namedtuple('LintArgs', (
'df', 'exclude', 'registry',
))):
"""
df : pandas.DataFrame
Dataframe containing channel data, typically as output from
`channel_dataframe()`
exclude : list
List of function names in `registry` to skip globally. When running on
CI, this will be merged with anything else detected from the commit
message or LINT_SKIP environment variable using the special string
"[skip lint <function name> for <recipe name>]". While those other
mechanisms define skipping on a recipe-specific basis, this argument
can be used to skip tests for all recipes. Use sparingly.
registry : list or tuple
List of functions to apply to each recipe. If None, defaults to
`lint_functions.registry`.
"""
def __new__(cls, df, exclude=None, registry=None):
return super().__new__(cls, df, exclude, registry)
def channel_dataframe(cache=None, channels=['bioconda', 'conda-forge',
'defaults']):
"""
Return channel info as a dataframe.
Parameters
----------
cache : str
Filename of cached channel info
channels : list
Channels to include in the dataframe
"""
if cache is not None and os.path.exists(cache):
df = pd.read_table(cache)
else:
# Get the channel data into a big dataframe
dfs = []
for platform in ['linux', 'osx']:
for channel in channels:
repo, noarch = utils.get_channel_repodata(channel, platform)
x = pd.DataFrame(repo)
x = x.drop([
'arch',
'default_numpy_version',
'default_python_version',
'platform',
'subdir'])
for k in [
'build', 'build_number', 'name', 'version', 'license',
'platform'
]:
x[k] = x['packages'].apply(lambda y: y.get(k, np.nan))
x['channel'] = channel
dfs.append(x)
df = pd.concat(dfs).drop(['info', 'packages'], axis=1)
if cache is not None:
df.to_csv(cache, sep='\t')
return df
def lint(recipes, lint_args):
"""
Parameters
----------
recipes : list
List of recipes to lint
lint_args : LintArgs
"""
df = lint_args.df
exclude = lint_args.exclude
registry = lint_args.registry
if registry is None:
registry = lint_functions.registry
skip_dict = defaultdict(list)
commit_message = ""
if 'LINT_SKIP' in os.environ:
# Allow overwriting of commit message
commit_message = os.environ['LINT_SKIP']
else:
# Obtain commit message from last commit.
commit_message = utils.run(
['git', 'log', '--format=%B', '-n', '1'], mask=False
).stdout
# For example the following text in the commit message will skip
# lint_functions.uses_setuptools for recipe argparse:
#
# [ lint skip uses_setuptools for argparse ]
skip_re = re.compile(
r'\[\s*lint skip (?P<func>\w+) for (?P<recipe>.*?)\s*\]')
to_skip = skip_re.findall(commit_message)
if exclude is not None:
# exclude arg is used to skip test for *all* packages
to_skip += list(itertools.product(exclude, recipes))
for func, recipe in to_skip:
skip_dict[recipe].append(func)
hits = []
for recipe in sorted(recipes):
# Since lint functions need a parsed meta.yaml, checking for parsing
# errors can't be a lint function.
#
# TODO: do we need a way to skip this the same way we can skip lint
# functions? I can't think of a reason we'd want to keep an unparseable
# YAML.
metas = []
try:
for platform in ["linux", "osx"]:
config = utils.load_conda_build_config(platform=platform, trim_skip=False)
metas.extend(utils.load_all_meta(recipe, config=config, finalize=False))
except (
yaml.scanner.ScannerError, yaml.constructor.ConstructorError
) as e:
result = {'parse_error': str(e)}
hits.append(
{'recipe': recipe,
'check': 'parse_error',
'severity': 'ERROR',
'info': result})
continue
logger.debug('lint {}'.format(recipe))
# skips defined in commit message
skip_for_this_recipe = set(skip_dict[recipe])
# skips defined in meta.yaml
for meta in metas:
persistent = meta.get_value('extra/skip-lints', [])
skip_for_this_recipe.update(persistent)
for func in registry:
if func.__name__ in skip_for_this_recipe:
skip_sources = [
('Commit message', skip_dict[recipe]),
('skip-lints', persistent),
]
for source, skips in skip_sources:
if func.__name__ not in skips:
continue
logger.info(
'%s defines skip lint test %s for recipe %s'
% (source, func.__name__, recipe))
continue
result = func(recipe, metas, df)
if result:
hits.append(
{'recipe': recipe,
'check': func.__name__,
'info': result})
if hits:
report = | pd.DataFrame(hits) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 15 21:56:08 2020
@author: <NAME>
"""
# STEP1----------------- # Importing the libraries------------
#-------------------------------------------------------------
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import glob
import scipy.signal as ss
import csv
import sklearn
from quilt.data.ResidentMario import missingno_data
import missingno as msno
import seaborn as sns
from sklearn.impute import SimpleImputer
# STEP2------------------# Importing the DATASET ------------
#------------------------------------------------------------
# Loading data from the iMotions the path to csv file directory
os.chdir("\\ML4TakeOver\\Data\\RawData")
directory = os.getcwd()
dataFrame_takeover = pd.read_csv('takeover_Alarm_Eye_Car_Data_10sec.csv')
dataFrame_takeover = dataFrame_takeover.drop(['Unnamed: 0','Unnamed: 0.1',
'CurrentGear','GazeVelocityAngle','GazeRightx', 'GazeRighty',
'AutoGear','AutoBrake','GazeLeftx', 'GazeLefty'], axis=1)
## CHANGING FALSE ALARM TO TRUE ALARM FOR FIRST few participants CHECK IF THEY SHOULD HA
searchforSeries = ['004','005','006','007','008']
dataFrame_takeover.loc[(dataFrame_takeover['Name'].str.contains('|'.join(searchforSeries))), 'Coming_AlarmType'] = 'TA'
# STEP5============================ Adding NoneDriving Task column ======================
#========================================================================================
### creat Task column
# map task to the alarm
TaskAlarm = {'Reading' : [16,84,103,339],
'Cell': [5, 259, 284, 323],
'Talk': [137, 178, 185, 332],
'Question': [213, 254, 191]}
dataFrame_takeover['NDTask'] = 'XXX'
dataFrame_takeover.loc[dataFrame_takeover['Coming_Alarm'].isin(TaskAlarm['Reading']), 'NDTask'] = 'Reading' # reading task
dataFrame_takeover.loc[dataFrame_takeover['Coming_Alarm'].isin(TaskAlarm['Cell']), 'NDTask'] = 'Cell' # cell task
dataFrame_takeover.loc[dataFrame_takeover['Coming_Alarm'].isin(TaskAlarm['Talk']), 'NDTask'] = 'Talk' # talk task
dataFrame_takeover.loc[dataFrame_takeover['Coming_Alarm'].isin(TaskAlarm['Question']), 'NDTask'] = 'Question' # question task
#================= Visualizing "TakeOver/Not-takeover" for each Alarm type ========================
#==========================================================================================================
# Remove 000 from data for visualization
# we don't have zero alarm type anymore
dataFrame_Alarm = dataFrame_takeover
# check the number of user's per alarm
tmp_result = pd.DataFrame(dataFrame_Alarm.groupby(['Coming_Alarm']).agg({'Name': 'unique'}).reset_index())
[len(a) for a in tmp_result['Name']]
tmp2 = pd.DataFrame(dataFrame_Alarm.groupby(['Name']).agg({'Coming_Alarm': 'unique'}).reset_index())
[len(a) for a in tmp2['Coming_Alarm']]
# How many takeover and not-takeover per alarm?
dataFrame_Alarm.groupby(['Coming_AlarmType','Takeover']).size().plot(kind = 'barh', legend = False) # Frequency Based
plt.show()
dataFrame_Alarm.groupby(['Coming_AlarmType','Takeover']).agg({"Name": lambda x: x.nunique()}).plot(kind = 'barh', legend = False)
# Takeover frequency per individuals
tmp_dataframe = pd.DataFrame(dataFrame_Alarm.groupby(['Name', 'Coming_AlarmType','Takeover']).agg({"Coming_Alarm": lambda x: x.nunique()}))
tmp_dataframe.to_csv("UserComingAlarmType"+'.csv')
dataFrame_Alarm.groupby(['Name', 'Coming_AlarmType']).agg({"Takeover": lambda x: x.nunique()})
dataFrame_Alarm.groupby(['Name', 'Coming_AlarmType','Takeover']).size().unstack().plot(kind = 'bar', stacked = True)
dataFrame_AlarmIndividual = pd.DataFrame(dataFrame_Alarm.groupby(['Name', 'Coming_AlarmType','Takeover']).size().reset_index(name = 'frequency'))
| pd.DataFrame(tmp_dataframe) | pandas.DataFrame |
import numpy as np
import pandas as pd
from lexos.helpers.error_messages import SEG_NON_POSITIVE_MESSAGE, \
EMPTY_DTM_MESSAGE
from lexos.models.top_words_model import TopwordModel, TopwordTestOptions
from lexos.receivers.top_words_receiver import TopwordAnalysisType
# ---------------------------- Test for z-test ------------------------------
# noinspection PyProtectedMember
class TestZTest:
def test_normal_case(self):
assert round(
TopwordModel._z_test(p1=0.1, p2=0.3, n1=10, n2=1000), 2) == -1.38
assert round(
TopwordModel._z_test(p1=0.3, p2=0.1, n1=100, n2=100), 2) == 3.54
assert TopwordModel._z_test(p1=1, p2=1, n1=100, n2=100) == 0
def test_special_case(self):
try:
_ = TopwordModel._z_test(p1=0.1, p2=0.3, n1=100, n2=0)
raise AssertionError("Error message did not raise")
except AssertionError as error:
assert str(error) == SEG_NON_POSITIVE_MESSAGE
try:
_ = TopwordModel._z_test(p1=0.1, p2=0.3, n1=0, n2=100)
raise AssertionError("Error message did not raise")
except AssertionError as error:
assert str(error) == SEG_NON_POSITIVE_MESSAGE
# ---------------------------------------------------------------------------
# ------------------- Test ALL_TO_PARA --------------------------------------
# Create test suite for normal case.
test_dtm_all_to_para = pd.DataFrame(
data=np.array([(1, 1, 0, 0), (0, 0, 1, 10)]),
columns=np.array(["A", "B", "C", "D"]),
index=np.array([0, 1]))
test_front_end_option_all_to_para = TopwordAnalysisType.ALL_TO_PARA
test_id_temp_label_map_all_to_para = {0: "F1", 1: "F2"}
test_class_division_map_all_to_para = pd.DataFrame(
data=np.array([(True, True)]),
index=["C1"],
columns=[0, 1])
test_option_all_to_para = TopwordTestOptions(
doc_term_matrix=test_dtm_all_to_para,
document_label_map=test_id_temp_label_map_all_to_para,
front_end_option=test_front_end_option_all_to_para,
division_map=test_class_division_map_all_to_para)
test_topword_model_all_to_para = TopwordModel(
test_options=test_option_all_to_para)
# noinspection PyProtectedMember
test_results_all_to_para =\
test_topword_model_all_to_para._get_result().results
# -------------------------Test Special ALL_TO_PARA---------------------------
# Create test suite for special case.
test_option_empty_all_to_para = TopwordTestOptions(
doc_term_matrix=pd.DataFrame(data=[], index=[], columns=[]),
document_label_map={},
front_end_option=test_front_end_option_all_to_para,
division_map=pd.DataFrame(data=[], index=[], columns=[]))
test_topword_model_empty_all_to_para = TopwordModel(
test_options=test_option_empty_all_to_para)
# ---------------------------------------------------------------------------
class TestParaToGroup:
def test_normal_case_result(self):
assert test_results_all_to_para[0]['D'] == -2.1483
assert test_results_all_to_para[1].dtype == "float64"
assert test_results_all_to_para[1].name == \
"Document \"F2\" Compared To The Corpus"
def test_special_case(self):
try:
# noinspection PyProtectedMember
_ = test_topword_model_empty_all_to_para._get_result()
raise AssertionError("Error message did not raise")
except AssertionError as error:
assert str(error) == EMPTY_DTM_MESSAGE
# ---------------------------------------------------------------------------
# -------------------- Test CLASS_TO_PARA------------------------------------
# Create test suite for normal case.
test_dtm_class_to_para = pd.DataFrame(
data=np.array([(1, 1, 0, 0, 0, 0, 0, 0),
(0, 0, 1, 1, 0, 0, 0, 0),
(0, 0, 0, 0, 1, 1, 0, 0),
(0, 0, 0, 0, 0, 0, 1, 100)]),
index=np.array([0, 1, 2, 3]),
columns=np.array(["A", "B", "C", "D", "E", "F", "G", "H"]))
test_id_temp_label_map_class_to_para = {0: "F1", 1: "F2", 2: "F3", 3: "F4"}
test_front_end_option_class_to_para = TopwordAnalysisType.CLASS_TO_PARA
test_class_division_map_class_to_para = pd.DataFrame(
data=np.array([(True, True, False, False), (False, False, True, True)]),
index=np.array(["C1", "C2"]),
columns=np.array([0, 1, 2, 3]))
test_option_class_to_para = TopwordTestOptions(
doc_term_matrix=test_dtm_class_to_para,
document_label_map=test_id_temp_label_map_class_to_para,
front_end_option=test_front_end_option_class_to_para,
division_map=test_class_division_map_class_to_para)
test_topword_model_one_class_to_para = TopwordModel(
test_options=test_option_class_to_para)
# noinspection PyProtectedMember
test_results_class_to_para =\
test_topword_model_one_class_to_para._get_result().results
# -------------------- Test Special CLASS_TO_PARA-----------------------------
# Create test suite for special case.
test_option_empty_class_to_para = TopwordTestOptions(
doc_term_matrix=pd.DataFrame(data=[], index=[], columns=[]),
document_label_map={},
front_end_option=test_front_end_option_class_to_para,
division_map=pd.DataFrame(data=[], index=[], columns=[]))
test_topword_model_empty_one_class_to_para = \
TopwordModel(test_options=test_option_empty_class_to_para)
# ---------------------------------------------------------------------------
# Testing starts here
class TestClassToAll:
def test_normal_case_result(self):
assert test_results_class_to_para[0]['A'] == 7.2108
assert test_results_class_to_para[0]['B'] == 7.2108
assert test_results_class_to_para[0]['H'] == -6.3857
assert test_results_class_to_para[1].dtype == 'float64'
assert test_results_class_to_para[1].name == \
'Document "F2" Compared To Class "C2"'
def test_special_case(self):
try:
# noinspection PyProtectedMember
_ = test_topword_model_empty_one_class_to_para._get_result()
raise AssertionError("Error message did not raise")
except AssertionError as error:
assert str(error) == EMPTY_DTM_MESSAGE
# ---------------------------------------------------------------------------
# ------------------- Test CLASS_TO_CLASS ----------------------------------
# Create test suite for normal case.
test_dtm_class_to_class = pd.DataFrame(
data=np.array([(1, 1, 0, 0, 0, 0, 0, 0),
(0, 0, 1, 1, 0, 0, 0, 0),
(0, 0, 0, 0, 1, 1, 0, 0),
(0, 0, 0, 0, 0, 0, 1, 100)]),
index=np.array([0, 1, 2, 3]),
columns=np.array(["A", "B", "C", "D", "E", "F", "G", "H"]))
test_id_temp_label_map_class_to_class = {0: "F1", 1: "F2", 2: "F3", 3: "F4"}
test_front_end_option_class_to_class = TopwordAnalysisType.CLASS_TO_CLASS
test_class_division_map_class_to_class = pd.DataFrame(
data=np.array([(True, True, False, False), (False, False, True, True)]),
index=np.array(["C1", "C2"]),
columns=np.array([0, 1, 2, 3]))
test_option_class_to_class = TopwordTestOptions(
doc_term_matrix=test_dtm_class_to_class,
document_label_map=test_id_temp_label_map_class_to_class,
front_end_option=test_front_end_option_class_to_class,
division_map=test_class_division_map_class_to_class)
test_topword_model_two_class_to_class = TopwordModel(
test_options=test_option_class_to_class)
# noinspection PyProtectedMember
test_results_class_to_class = \
test_topword_model_two_class_to_class._get_result().results
# ---------------------Test Special CLASS_TO_CLASS----------------------------
# Create test suite for special case.
test_option_empty_class_to_class = TopwordTestOptions(
doc_term_matrix= | pd.DataFrame(data=[], index=[], columns=[]) | pandas.DataFrame |
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# We need to find the leaders by mentions.
# We need to rank leaders by weight given to them by mentions (Building Liquid Ranking).
# Initial weight to all mentions is given as equal.
# Calculate score on basis of above, iterations, updating, also mentions of feed owners
# should be considered while scoring.
import re
import operator
import math
import numpy as np
import pandas as pd
from collections import Counter
df = pd.read_csv('D:/Thesis/crypto_twitter_reddit.csv', index_col=0)
df = df[df.type == 'twitter'] # only twitter data
# Finding mentions in data by feed owners
def feedomen(feedo):
flist = []
df = pd.read_csv('D:/Thesis/crypto_twitter_reddit.csv', index_col=0)
df = df[df.type == 'twitter']
df.link = df.link.str.replace('https://twitter.com/', '@', regex=True)
df = df[df.link == feedo]
df.text = df.text.fillna('')
for row in df.text:
if row[0] == 'R' and row[1] == 'T':
continue
else:
for word in row.split():
if word[0] == '@':
word = word.split("'")[0]
flist.append(word.lower())
# Cleaning mentions from extra characters
omentions = []
for row in flist:
for char in "[#!$%^&*()-+|:;,<>?./…]":
row = row.replace(char, '')
omentions.append(row)
# Counter for mentions
dct = Counter(omentions)
cist = sorted(dct.items(), key=operator.itemgetter(1), reverse=True)
#map(float,cist)
return(cist)
# Raters List
cmlist = []
df.link = df.link.fillna('')
df.link = df.link.str.replace('https://twitter.com/', '@', regex=True)
for link in df.link:
if link in cmlist:
continue
else:
cmlist.append(link)
def default_weight(raters):
ranking = []
for feedo in cmlist:
ranking.append(feedomen(feedo))
return ranking
def new_ranker(ratrw, rated):
wt = list(ratrw)
new_rated = []
for lqr in rated:
rt = list(lqr)
rt[1] = (rt[1] * wt[1]) + 1
new_rated.append(rt)
#print(new_rated)
return new_rated
def weights(ranking):
new_weights = []
for i in cmlist:
i = i.lower()
new_weights.append([i, 0])
for rater in ranking:
for ratrw in rater:
for ratrwm in new_weights:
if ratrw[0] == ratrwm[0] and ratrw[1] > ratrwm[1]:
ratrwm[1] = ratrw[1]
#print(new_weights)
return new_weights
def normalisation(ranking):
max = float()
for rater in ranking:
for rated in rater:
value = float(rated[1])
#print('value= ', value)
#if value > 1.0 :
valuelog = math.log((rated[1]+1), 10)
#print('valuelog= ', valuelog)
if max < valuelog:
max = valuelog
if value > max:
max = value
print (max)
for rater in ranking:
for rated in rater:
#print('rated',rated[1])
value = float(rated[1]) / max
if value == 0:
v = 0.1
rated[1] = value
#print('value updated', value)
return ranking
iterations=9 #sanity check
for check in range(iterations):
new_ranking = []
if check == 0:
ranking = default_weight(cmlist)
else:
weight = weights(ranking)
i = 0
for rated in ranking:
new_ranking.append(new_ranker(weight[i], rated))
i += 1
ranking = normalisation(new_ranking)
#print(ranking)
def counter(ranking):
final_list = []
for final in ranking:
for descend in final:
if descend[0] in final_list:
continue
else:
final_list.append([descend[0], 0])
return final_list
def counter_add(ranking, final_list):
for ranked in ranking:
for individual in ranked:
for rated in final_list:
if individual[0] == rated[0]:
rated[1] += individual[1]
return final_list
final_list = counter(ranking)
liquid_ranking = counter_add(ranking, final_list)
#print(liquid_ranking)
import operator # Descending order
peanuts = dict(liquid_ranking)
list = sorted(peanuts.items(), key=operator.itemgetter(1), reverse=True)
dfm = dict(list)
dfm = | pd.DataFrame.from_dict(data=dfm, orient='index') | pandas.DataFrame.from_dict |
import pandas as pd
tmtids = pd.read_csv('tmtids.csv')
tmtvac = pd.read_csv('tmt_speedvac_group.csv')
hphdate = pd.read_csv('hphdate.csv')
tmt_merged = pd.merge(tmtids, isodate, left_on='TMT-10 Set', right_on='TMT Set')
tmt_merged = pd.merge(tmt_merged, tmtvac, on='TMT Set')
tmt_merged = | pd.merge(tmt_merged, hphdate, on='TMT Set') | pandas.merge |
"""
This module contains the base functions and properties all
spatial dataframes will inherit from.
"""
from __future__ import print_function
from __future__ import division
from six import integer_types
import numpy as np
import pandas as pd
from arcgis.geometry import _types
from pandas import DataFrame, Series
GEOTYPES = [_types.Geometry]
try:
import arcpy
HASARCPY = True
GEOTYPES.append(arcpy.Geometry)
except:
GEOTYPES = [_types.Geometry]
HASARCPY = False
try:
import shapely
from shapely.geometry.base import BaseGeometry as _BaseGeometry
HASSHAPELY = True
GEOTYPES.append(_BaseGeometry)
except ImportError:
HASSHAPELY = False
from warnings import warn
try:
from .index.quadtree import Index as QuadIndex
HAS_QUADINDEX = True
except ImportError:
HAS_QUADINDEX = False
try:
from .index.rtree import RTreeError, Rect
from .index.si import SpatialIndex
HAS_SINDEX = True
except ImportError:
class RTreeError(Exception):
pass
HAS_SINDEX = False
GEOTYPES = tuple(GEOTYPES)
#--------------------------------------------------------------------------
def _call_property(this, op, null_value=None, isGeoseries=False):
"""
calls a property by name on an object
Inputs:
this: object to call the function on
op: string, name of the function to call
null_value: if the function requires a specific return value on error
or failure, this value will be returned.
geoseries: if true, a spatial series is returned else a regular pandas
series object
Output:
Returns an value from the function or null value.
raises: ValueError
"""
if isGeoseries:
from . import GeoSeries
a = [getattr(geom, op, null_value) for geom in this.geometry \
if hasattr(geom, op)]#hasattr(geom, 'as_arcpy') and \
return GeoSeries(a,
index=this.index)
#[getattr(geom, op, null_value) for geom in this.geometry if hasattr(geom.as_arcpy, op)],
else:
a = [getattr(geom, op, null_value) for geom in this.geometry \
if hasattr(geom, op)]
return Series(a,
index=this.index)
#[getattr(geom.as_arcpy, op, null_value) for geom in this.geometry if hasattr(geom.as_arcpy, op)],
return null_value
#--------------------------------------------------------------------------
def _call_function(this, op, second_geometry=None,
null_value=None, isGeoseries=False,
**kwargs):
"""
Calls a function on a given object.
Inputs:
this: object to call the function on
op: string, name of the function to call
second_geometry: arcpy.Geometry object
null_value: if the function requires a specific return value on error
or failure, this value will be returned.
geoseries: if true, a spatial series is returned else a regular pandas
series object
kwargs: optional values to be passed to the function
Output:
Returns an value from the function or null value.
raises: ValueError
"""
from .geoseries import GeoSeries
other = None
if 'other' in kwargs and \
second_geometry is None:
other = kwargs.pop('other')
hasOther = True
hasSecondGeom = False
isGSeries = isinstance(other, BaseSpatialPandas)
isPSeries = isinstance(other, Series)
isGeom = isinstance(other, GEOTYPES)
elif second_geometry is not None and \
'other' not in kwargs:
hasOther = False
hasSecondGeom = True
isPSeries = isinstance(second_geometry, Series)
isSeries = isinstance(second_geometry, BaseSpatialPandas)
isGeom = isinstance(second_geometry, GEOTYPES)
elif 'other' in kwargs and second_geometry is not None:
raise ValueError("Two geometries given as other and second_geometry, you can only have one")
else: # default
other = None
hasOther = False
hasSecondGeom = False
isGeom = False
isGSeries = False
isPSeries = False
if isGeoseries:
if isPSeries or \
(other and isinstance(other, BaseSpatialPandas)):
this = this.geometry
if second_geometry is not None:
other = second_geometry
key = 'second_geometry'
elif other is not None:
key = 'other'
else:
key = 'no_geom'
sr = this.sr
this, other = this.align(other.geometry)
vals = []
for geom, other_geom in zip(this, other):
fn = getattr(geom, op)
if key == 'other':
kwargs['other'] = other_geom
vals.append(fn(**kwargs))
elif key == 'second_geometry':
vals.append(fn(second_geometry=second_geometry, **kwargs))
else:
vals.append(fn(**kwargs))
del fn
del geom
del other_geom
return GeoSeries(np.array(vals), index=this.index)
elif isGeom:
if second_geometry:
return GeoSeries([getattr(s, op)(second_geometry=second_geometry,
**kwargs)
for s in this.geometry],
index=this.index, )
else:
if hasOther:
kwargs['other'] = other
vals = [getattr(s, op)(**kwargs) for s in this.geometry]
return GeoSeries([getattr(s, op)(**kwargs)
for s in this.geometry],
index=this.index, )
else:
return GeoSeries([getattr(s, op)(**kwargs)
for s in this.geometry],
index=this.index, )
else:
if isPSeries:
this = this.geometry
if second_geometry is not None:
other = second_geometry
key = 'second_geometry'
elif other is not None:
key = 'other'
else:
key = 'no_geom'
sr = this.sr
this, other = this.align(other.geometry)
vals = []
for geom, other_geom in zip(this, other):
fn = getattr(geom, op)
if key == 'other':
kwargs['other'] = other_geom
vals.append(fn(**kwargs))
elif key == 'second_geometry':
vals.append(fn(second_geometry=other_geom, **kwargs))
else:
vals.append(fn(**kwargs))
del fn
del geom
del other_geom
return Series(np.array(vals), index=this.index)
fn = None
vals = []
for s in this.geometry:
if hasattr(s, op):
fn = getattr(s, op)
if second_geometry:
vals.append(fn(second_geometry=second_geometry,**kwargs))
else:
vals.append(fn(**kwargs))
elif s and hasattr(s, op) == False:
raise ValueError("Invalid operation: %s" % op)
else:
vals.append(np.nan)
del s
return Series(vals, index=this.index)
return null_value
########################################################################
class BaseSpatialPandas(object):
"""
Base object that the Series and DataFrame will inherit from.
"""
#----------------------------------------------------------------------
def _generate_sindex(self):
self._sindex = None
if not HAS_SINDEX and not HAS_QUADINDEX:
warn("Cannot generate spatial index: Missing package 'rtree'.")
elif HAS_QUADINDEX:
bbox = self.series_extent
if bbox is None:
bbox = [-180, -90, 180, 90]
qi = QuadIndex(bbox=bbox)
geometry_type = self.geometry_type.lower()
if geometry_type == 'point':
geometry_type = self.geometry[self.geometry.index[0]].type.lower()
for i, (idx, item) in enumerate(self.geometry.iteritems()):
if pd.notnull(item) and item:
if geometry_type in ('point', 'pointgeometry'):
factor = .01
else:
factor = 0
if geometry_type == 'pointgeometry':
item = item.centroid
if HASARCPY:
try:
xmin, ymin, xmax, ymax = item.extent
qi.insert(item=idx, bbox=(xmin - factor,
ymin - factor,
xmax + factor,
ymax + factor))
except:
pass
else:
try:
qi.insert(item=idx, bbox=(item.extent[0] - factor,
item.extent[1] - factor,
item.extent[2] + factor,
item.extent[3] + factor))
except:
pass
self._sindex = qi
elif HAS_SINDEX:
#(xmin, ymin, xmax, ymax)
if self.geometry_type.lower() == "point":
stream = ((i, (item.extent.XMin - .01, item.extent.YMin - .01, item.extent.XMax + .01, item.extent.YMax + .01), idx) for i, (idx, item) in
enumerate(self.geometry.iteritems()) if
| pd.notnull(item) | pandas.notnull |
import pandas
from modin.engines.ray.pandas_on_ray.frame.partition import PandasOnRayFramePartition
from modin import __execution_engine__
if __execution_engine__ == "Ray":
import ray
import pyarrow
class PyarrowOnRayFramePartition(PandasOnRayFramePartition):
def to_pandas(self):
"""Convert the object stored in this partition to a Pandas DataFrame.
Returns:
A Pandas DataFrame.
"""
dataframe = self.get().to_pandas()
assert type(dataframe) is pandas.DataFrame or type(dataframe) is pandas.Series
return dataframe
@classmethod
def put(cls, obj):
"""Put an object in the Plasma store and wrap it in this object.
Args:
obj: The object to be put.
Returns:
A `RayRemotePartition` object.
"""
return PyarrowOnRayFramePartition(ray.put(pyarrow.Table.from_pandas(obj)))
@classmethod
def length_extraction_fn(cls):
return lambda table: table.num_rows
@classmethod
def width_extraction_fn(cls):
return lambda table: table.num_columns - (1 if "index" in table.columns else 0)
@classmethod
def empty(cls):
return cls.put( | pandas.DataFrame() | pandas.DataFrame |
import numpy as np
import scipy
import pywt
import pandas as pd
def WTfilt_1d(sig):
# https://blog.csdn.net/weixin_39929602/article/details/111038295
coeffs = pywt.wavedec(data=sig, wavelet='db5', level=9)
cA9, cD9, cD8, cD7, cD6, cD5, cD4, cD3, cD2, cD1 = coeffs
threshold = (np.median(np.abs(cD1)) / 0.6745) * (np.sqrt(2 * np.log(len(cD1))))
# 将高频信号cD1、cD2置零
cD1.fill(0)
cD2.fill(0)
# 将其他中低频信号按软阈值公式滤波
for i in range(1, len(coeffs) - 2):
coeffs[i] = pywt.threshold(coeffs[i], threshold)
rdata = pywt.waverec(coeffs=coeffs, wavelet='db5')
# if np.isnan(rdata).any() == True:
# print(sig)
# print(rdata)
return rdata
def filter_and_detrend(data):
num_leads = len(data)
filtered_data = | pd.DataFrame() | pandas.DataFrame |
from web3 import Web3, HTTPProvider, IPCProvider
from tqdm import tqdm
import pandas as pd
from config import CONN, HTTP_ADDRESS, FILENAME, IPC_ADDRESS
def fetch_data(conn, start_block=46147, end_block=None):
web3 = Web3(conn)
if end_block is None:
end_block = 10**9
curr_block = start_block
while curr_block <= end_block:
print("Fetching block no: ", str(curr_block))
block_data = dict(web3.eth.getBlock(curr_block, full_transactions=True))
transactions = block_data['transactions']
all_receipts = []
print("Processing transactions for block no: ", str(curr_block), 'found transactions', len(transactions))
for transaction in transactions:
all_receipts.append(dict(web3.eth.getTransactionReceipt(transaction['hash'])))
df_trans = | pd.DataFrame(transactions) | pandas.DataFrame |
import datetime
import os
import re
import requests
import urllib.parse
import time
from bs4 import BeautifulSoup
import html2text
import numpy as np
import pandas
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException as slnm_NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException as slnm_StaleElementReferenceException
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
from tqdm import tqdm
#
search_urls = {
'food' : r'https://www.bbc.co.uk/news/topics/cp7r8vglgq1t/food',
'europe-migrant-crisis' : r'https://www.bbc.co.uk/news/topics/cnx753je2q4t/europe-migrant-crisis',
'eu-uk-post-brexit-trade-talks' : r'https://www.bbc.co.uk/news/topics/c4vm89lx8e8t/eu-uk-post-brexit-trade-talks',
'hong-kong-anti-government-protests' : r'https://www.bbc.co.uk/news/topics/c95yz8vxvy8t/hong-kong-anti-government-protests',
'tigray-crisis' : r'https://www.bbc.co.uk/news/topics/cr2pnx1173dt/tigray-crisis',
'facebook' : r'https://www.bbc.co.uk/news/topics/cmj34zmwxjlt/facebook',
'puerto-rico' : r'https://www.bbc.co.uk/news/topics/cg41ylwvw3gt/puerto-rico',
'coronavirus' : r'https://www.bbc.co.uk/news/coronavirus',
#'state-schools' : r'https://www.bbc.co.uk/news/topics/cdl8n2edeg8t/state-schools',
#'eurovision-song-contest' : r'https://www.bbc.co.uk/news/topics/cg20vmmp95jt/eurovision-song-contest',
#r'',
}
tmp_dir = 'tmp/bbc-non-climate'
url_list_file_name = 'urls.csv'
base_url = 'https://www.bbc.co.uk'
#
#
url_list_path = os.path.join(tmp_dir, url_list_file_name)
data_file_path = os.path.join('data', 'bbc-non-climate.csv')
# link scraping
"""
SEARCH PAGES
list of articles:
<ol class="gs-u-m0 gs-u-p0 lx-stream__feed qa-stream"
<li class="lx-stream__post-container placeholder-animation-finished">
some li are just a video (no lihk)
links:
<a class="qa-heading-link lx-stream-post__header-link"
href=/news/CATEG-NUMBER>...
pages:
<div class="lx-pagination__nav ..."
<span class="lx-pagination__page-number qa-pagination-current-page-number">
CURRENT PAGE NUMBER
<span class="lx-pagination__page-number qa-pagination-total-page-number">
TOTAL NB PAGES
<a class="lx-pagination__btn gs-u-mr+ qa-pagination-next-page lx-pagination__btn--active"
LINK TO NEXT PAGE
href=/false/page/2
(needs selenium)
"""
firefox_bin_path = '/usr/bin/firefox'
geckodriver_path = '/usr/bin/geckodriver'
def save_urls(urls):
pandas.DataFrame({ 'url' : urls }).to_csv(url_list_path, index = False)
firefox_binary = FirefoxBinary(firefox_bin_path)
capabilities = DesiredCapabilities.FIREFOX.copy()
capabilities['marionette'] = True
driver = webdriver.Firefox(firefox_binary = firefox_binary,
capabilities = capabilities,
executable_path = geckodriver_path)
url_count = 0
urls = []
# for each topic
for topic, search_url in search_urls.items():
# load first page
driver.get(search_url)
# get page count
nb_pages = int(driver.find_element_by_class_name('qa-pagination-total-page-number').text)
print(f'Topic "{topic}" has {nb_pages} result pages')
# for each page
for page_index in range(nb_pages):
# scrape the links
link_list = driver.find_element_by_tag_name('ol').find_elements_by_tag_name('li')
for entry in link_list:
try:
link = entry.find_element_by_tag_name('a').get_attribute('href')
except(slnm_NoSuchElementException):
continue
except(slnm_StaleElementReferenceException):
print(f'Stale element at page {page_index}')
continue
urls.append(link)
url_count += 1
if((url_count + 1) % 120 == 0):
save_urls(urls)
# goto next page
if(page_index < nb_pages - 1):
try:
driver.find_element_by_class_name('qa-pagination-next-page').click()
except(slnm_NoSuchElementException):
# BBC got tired?
break
print(f'Actual number of pages: {page_index+1}')
# (crashed at some point with 4079 urls, half of them link to the faq)
# last valid article is about covid-19
# save link list
save_urls(urls)
# remove the faq links
url_df = pandas.read_csv(url_list_path)
url_df = url_df[url_df.url != r'http://www.bbc.co.uk/faqs/questions/bbc_online/sharing']
url_df.to_csv(url_list_path, index = False)
# scrape the articles
"""
ARTICLE PAGES:
inside <article>
inside <header>
title: h1 #main-heading
author: just after the h1: p > span[1] > a text
date: just after the p: div > dd > span > span[1] > time (attr=) datetime="2020-11-23T22:24:21.000Z"
tags: just after the div: div > div[1] > div > ul > li[*] > a text
</header>
content: div[*] with attr: data-component="text-block" > p text
"""
def save_data(data_file_path, articles):
articles_df = | pandas.DataFrame(articles) | pandas.DataFrame |
# coding: utf-8
# In[2]:
import pandas as pd
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
from pandas.tools.plotting import autocorrelation_plot
# from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score, roc_curve
from sklearn.metrics import jaccard_similarity_score
import seaborn as sns
import datetime
plt.style.use('ggplot')
from bisect import bisect
import re
pd.options.display.max_columns = 999
# In[5]:
lst_f_featureimportance = glob.glob("log_assess/*.csv")
lst_f_featureimportance_actual = glob.glob("log_assess/actual_model_performance/*.csv")
lst_f_performance = glob.glob("log_assess/actual_model_performance/*.txt")
lst_f_res = glob.glob("result_assess/actual_model_result/Results*.csv")
lst_f_roc = glob.glob("roc_assess/*.csv")
lst_f_input = glob.glob("input_data/test/*.csv")
lst_f_actual_res = glob.glob("result_assess/actual_model_result/merged_*.csv")
# In[16]:
fontsize = 20
# In[4]:
# ###2nd run
# lst_f_featureimportance_2nd = glob.glob("backup/log_assess/*.csv")
# lst_f_performance_2nd = glob.glob("backup/log_assess/*.txt")
# lst_f_res_2nd = glob.glob("backup/result_assess/*.csv")
# lst_f_roc_2nd = glob.glob("backup/roc_assess/*.csv")
# ## 1. Accuracy/Kappa/AUC/Recall/F1
# ### Value tendency
# In[1029]:
lst_performance = []
for filename in lst_f_performance:
with open(filename) as f:
lst_performance.append([i for i in f.read().split("\n") if i.strip()])
dict_performance = {}
# dict_performance['accuracy'] = []
# dict_performance['kappa'] = []
dict_performance['auc'] = []
# dict_performance['recall'] = []
# dict_performance['precision'] = []
dict_performance['f1'] = []
dict_performance['date'] = []
for idx, p in enumerate(lst_performance):
for em in p:
if 'AUC' in em:
auc = float(em[em.find('=')+2:].strip())
if 'f1' in em:
dict_performance['date'].append(lst_f_performance[idx])
dict_performance['f1'].append(float(em[em.find('=')+2:].strip()))
dict_performance['auc'].append(auc)
# if 'Accuracy' in em:
# dict_performance['accuracy'].append(float(em[em.find('=')+2:].strip()))
# if 'kappa' in em:
# dict_performance['kappa'].append(float(em[em.find('=')+2:].strip()))
# if 'recall' in em:
# dict_performance['recall'].append(float(em[em.find('=')+2:].strip()))
# if 'precision' in em:
# dict_performance['precision'].append(float(em[em.find('=')+2:].strip()))
df_performance = pd.DataFrame(dict_performance)
def getDate(x):
return x.replace("log_assess/actual_model_performance/ModelPerformance_", "").replace(".txt","")[:10]
df_performance['date'] = pd.to_datetime(df_performance['date'].apply(lambda x: getDate(x)))
df_performance = df_performance.sort_values(by='date').reset_index(drop=True)
# In[6]:
### 2nd run
# lst_performance_2nd = []
# for filename in lst_f_performance_2nd:
# with open(filename) as f:
# lst_performance_2nd.append([i for i in f.read().split("\n") if i.strip()])
# dict_performance_2nd = {}
# dict_performance_2nd['accuracy'] = []
# dict_performance_2nd['kappa'] = []
# dict_performance_2nd['auc'] = []
# dict_performance_2nd['recall'] = []
# dict_performance_2nd['precision'] = []
# dict_performance_2nd['f1'] = []
# for p in lst_performance_2nd:
# for em in p:
# if 'Accuracy' in em:
# dict_performance_2nd['accuracy'].append(float(em[em.find('=')+2:].strip()))
# if 'kappa' in em:
# dict_performance_2nd['kappa'].append(float(em[em.find('=')+2:].strip()))
# if 'AUC' in em:
# dict_performance_2nd['auc'].append(float(em[em.find('=')+2:].strip()))
# if 'recall' in em:
# dict_performance_2nd['recall'].append(float(em[em.find('=')+2:].strip()))
# if 'precision' in em:
# dict_performance_2nd['precision'].append(float(em[em.find('=')+2:].strip()))
# if 'f1' in em:
# dict_performance_2nd['f1'].append(float(em[em.find('=')+2:].strip()))
# df_performance_2nd = pd.DataFrame(dict_performance_2nd)
# dict_date_2nd = {}
# dict_date_2nd['date'] = [fn.replace("backup/log_assess/ModelPerformance_", "").replace(".txt","") for fn in lst_f_performance_2nd]
# df_date_2nd = pd.DataFrame(dict_date_2nd)
# df_performance_2nd = df_performance_2nd.join(df_date_2nd)
# df_performance_2nd['date'] = pd.to_datetime(df_performance_2nd['date'])
# df_performance_2nd = df_performance_2nd.sort_values(by='date').reset_index(drop=True)
# # df_performance.set_index(['date'],inplace=True)
# In[1129]:
kappa_kdd = 0.33
auc_kdd = 0.75
recall_kdd = 0.50
precision_kdd = 0.26
post_deploy = 'Actual performance over time'
pre_deploy = "Initial trained model"
lst_date = [""] + df_performance['date'].dt.strftime('%m-%d-%y').tolist() + [""]
fig, axes = plt.subplots(1,2,figsize=(25,5))
ax = axes.flat
# ax[0].plot(df_performance['accuracy'], marker='o')
# # ax[0].plot(df_performance_2nd['date'], df_performance_2nd['accuracy'], marker='o')
# ax[0].set_title("accuracy")
# # ax[0].legend(['1st run','2nd run'])
# ax[0].plot(df_performance['date'], df_performance['kappa'], marker='o')
# ax[0].plot(df_performance['date'], np.full((df_performance.shape[0]), kappa_kdd), ls='dashed', color = 'r')
# # ax[1].plot(df_performance_2nd['date'], df_performance_2nd['kappa'], marker='o')
# ax[0].set_title("kappa")
# max_lim = max(df_performance['kappa'].max(), kappa_kdd)+0.03
# min_lim = min(df_performance['kappa'].min(), kappa_kdd)-0.02
# ax[0].set_ylim([min_lim, max_lim])
# # ax[1].legend(['1st run','2nd run'])
ax[0].plot(df_performance['date'], df_performance['auc'], marker='o')
# ax[0].plot(df_performance['date'], np.full((df_performance.shape[0]), auc_kdd), ls='dashed', color = 'r')
# ax[2].plot(df_performance_2nd['date'], df_performance_2nd['auc'], marker='o')
ax[0].set_title("auc")
max_lim = df_performance['auc'].max() + 0.02
min_lim = df_performance['auc'].min() - 0.02
ax[0].set_ylim([min_lim, max_lim])
ax[0].set_ylabel("AUC score")
# max_lim = max(df_performance['auc'].max(), auc_kdd)+0.02
# min_lim = min(df_performance['auc'].min(), auc_kdd)-0.02
# ax[2].legend(['1st run','2nd run'])
# ax[2].plot(df_performance['date'], df_performance['recall'], marker='o')
# ax[2].plot(df_performance['date'], np.full((df_performance.shape[0]), recall_kdd), ls='dashed', color = 'r')
# # ax[3].plot(df_performance_2nd['date'], df_performance_2nd['recall'], marker='o')
# max_lim = max(df_performance['recall'].max(), recall_kdd)+0.03
# min_lim = min(df_performance['recall'].min(), recall_kdd)-0.02
# ax[2].set_ylim([min_lim, max_lim])
# ax[2].set_title("recall")
# # ax[3].legend(['1st run','2nd run'])
# ax[3].plot(df_performance['date'], df_performance['precision'], marker='o')
# ax[3].plot(df_performance['date'], np.full((df_performance.shape[0]), precision_kdd), ls='dashed', color = 'r')
# # ax[4].plot(df_performance_2nd['date'], df_performance_2nd['precision'], marker='o')
# max_lim = max(df_performance['precision'].max(), precision_kdd)+0.03
# min_lim = min(df_performance['precision'].min(), precision_kdd)-0.02
# ax[3].set_ylim([min_lim, max_lim])
# ax[3].set_title("precision")
# ax[4].legend(['1st run','2nd run'])
ax[1].plot(df_performance['date'], df_performance['f1'], marker='o')
# ax[5].plot(df_performance_2nd['date'], df_performance_2nd['f1'], marker='o')
ax[1].set_title("F1")
ax[1].set_ylabel("F1 score")
# ax[5].legend(['1st run','2nd run'])
for ax in fig.axes:
# plt.sca(ax)
# plt.xticks(rotation=45)
# ax.set_xticks(np.arange(len(df_performance['date'])))
# ax.set_xticklabels(df_performance['date'].dt.strftime("%m-%d-%y"), rotation = -45, ha='left')
# ax.legend([post_deploy, pre_deploy], loc = 'upper left')
ax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=SU))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d-%y'))
ax.xaxis.set_tick_params(rotation=-45)
ax.set_xticklabels(lst_date, ha='left')
fig.suptitle("Performance Metrics Over Time(Commercial)", fontsize=15)
plt.tight_layout()
plt.subplots_adjust(top=0.9)
plt.savefig("analysis_plotting/Performance Metrics Over Time_Commercial.pdf")
# ### Mean/std tendency
# In[1116]:
df_statistic = pd.DataFrame()
for col in df_performance:
if col in ['accuracy', 'auc', 'f1', 'kappa', 'precision' ,'recall']:
df_statistic['mean_%s'%col] = df_performance[col].expanding().mean()
df_statistic['std_%s'%col] = df_performance[col].expanding().std()
df_statistic['date'] = df_performance['date']
df_statistic = df_statistic.iloc[1:,:].reset_index(drop=True)
label_time_window = pd.Series(["over %d week"%i for i in range(1,df_performance.shape[0])], name='time window')
df_statistic = | pd.concat([df_statistic,label_time_window], axis=1) | pandas.concat |
import argparse
import math
import os
import shutil
import sys
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
import torch
from moviepy.editor import ImageSequenceClip
from skimage import io
from torchvision import transforms
from tqdm.auto import tqdm
from dataloading.nvidia import NvidiaDataset, Normalize, NvidiaCropWide
from pilotnet import PilotNetConditional, PilotnetControl
from trainer import Trainer, ConditionalTrainer, ControlTrainer
from velocity_model.velocity_model import VelocityModel
def create_driving_video(dataset_folder, output_modality):
dataset_path = Path(dataset_folder)
dataset = NvidiaDataset([dataset_path], output_modality=output_modality, n_branches=3,
metadata_file="nvidia_frames.csv")
temp_frames_folder = dataset_path / 'temp'
shutil.rmtree(temp_frames_folder, ignore_errors=True)
temp_frames_folder.mkdir()
draw_driving_frames(dataset, temp_frames_folder, output_modality)
output_video_path = dataset_path / 'video.mp4'
convert_frames_to_video(temp_frames_folder, output_video_path, fps=30)
shutil.rmtree(temp_frames_folder, ignore_errors=True)
print(f"{dataset.name}: output video {output_video_path} created.")
def create_prediction_video(dataset_folder, output_modality, model_path, model_type):
dataset_path = Path(dataset_folder)
dataset = NvidiaDataset([dataset_path], name=dataset_path.name, output_modality=output_modality,
n_branches=3, n_waypoints=10)
#dataset.frames = dataset.frames[9160:9500]
temp_frames_folder = dataset_path / 'temp'
shutil.rmtree(temp_frames_folder, ignore_errors=True)
temp_frames_folder.mkdir()
if output_modality == "steering_angle":
steering_predictions = get_predictions(dataset_path, model_path, "steering_angle")
speed_predictions = get_speed_predictions(dataset)
draw_prediction_frames(dataset, steering_predictions, speed_predictions, temp_frames_folder)
if output_modality == "waypoints":
trajectory = get_predictions(dataset_path, model_path, "waypoints")
draw_prediction_frames_wp(dataset, trajectory, temp_frames_folder)
output_video_path = dataset_path / f"{str(Path(model_path).parent.name)}.mp4"
convert_frames_to_video(temp_frames_folder, output_video_path, fps=30)
shutil.rmtree(temp_frames_folder, ignore_errors=True)
print(f"{dataset.name}: output video {output_video_path} created.")
def get_predictions(dataset_path, model_path, output_modality):
print(f"{dataset_path.name}: {output_modality} predictions")
#trainer.force_cpu() # not enough memory on GPU for parallel processing # TODO: make input argument
n_outputs = 1 if output_modality == "steering_angle" else 20
if model_type == "pilotnet-conditional":
model = PilotNetConditional(n_branches=3, n_outputs=n_outputs)
trainer = ConditionalTrainer(None, target_name=output_modality, n_conditional_branches=3)
elif model_type == "pilotnet-control":
model = PilotnetControl(n_outputs=1)
trainer = ControlTrainer(None, target_name=output_modality, n_conditional_branches=3)
else:
print(f"Unknown model type '{args.model_type}'")
sys.exit()
model.load_state_dict(torch.load(model_path))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
model.eval()
dataloader = get_data_loader(dataset_path, output_modality)
steering_predictions = trainer.predict(model, dataloader)
return steering_predictions
def get_data_loader(dataset_path, output_modality):
tr = transforms.Compose([NvidiaCropWide(), Normalize()])
dataset = NvidiaDataset([Path(dataset_path)],
tr, name=dataset_path.name, output_modality=output_modality, n_branches=3)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=64, shuffle=False,
num_workers=16, pin_memory=True, persistent_workers=True)
return dataloader
def get_speed_predictions(dataset):
print(f"{dataset.name}: speed predictions")
velocity_model = VelocityModel(positions_parquet='velocity_model/summer2021-positions.parquet')
frames = dataset.frames
x = frames["position_x"]# + np.random.normal(0, 0.1, len(frames))
y = frames["position_y"]# + np.random.normal(0, 0.1, len(frames))
yaw = frames["yaw"]# + np.random.normal(0, 0.2, len(frames))
result_df = | pd.DataFrame(data={'x': x, 'y': y, 'yaw': yaw}) | pandas.DataFrame |
import itertools
import os
import pandas as pd
import numpy as np
import csv
import glob
from decimal import *
from itertools import chain
import statistics as stat
import datetime
from io import StringIO
import matplotlib.pyplot as plt
# What Phase are we in?
Phase = "3H"
#What exact are we looking at? 1 or 2?
Exact_num = "2"
#this file is not going to distinguish between Agricultural Residue and Firewood
# these are for the raw files to average later
T_D_Fuel = []
T_D_KG_removed = []
T_D_Temp = []
T_D_Cook_comp = []
T_D_Cook_PM = []
T_D_Kit_comp = []
T_D_Kit_PM = []
T_d_set_fuel = []
#specfic sensors for each household's metrics
ID_HH_m = []
HH_fuel_removed_for_phase = []
HH_avg_temp = []
HH_avg_cook_comp = []
HH_avg_Kit_comp = []
HH_sum_KIT_PM = []
HH_sum_cook_pm = []
HH_avg_cook_PM =[]
HH_avg_Kit_PM =[]
HH_std_temp = []
HH_std_cook_comp = []
HH_std_Kit_comp = []
HH_std_cook_PM =[]
HH_std_Kit_PM = []
HH_total_time_f_collection = []
#For the Day summary that is to be used later
#Day_met_path = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1N/Compiler/Raw_Day/Raw_D_metrics/1N_HH_raw_Day_metrics_1005.csv"
os.chdir("C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/"+ Phase +"/Compiler_"+Exact_num+"_exact/Raw_D_metrics")
# For Hood portion
#os.chdir("C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2H/Compiler/Raw_Day/Raw_D_metrics")
# This was for hood portion
Day_met_path = os.getcwd()
csv_R_m = glob.glob(os.path.join(Day_met_path, "*.csv"))
for file in csv_R_m:
with open(file, 'r') as f:
csv_reader = csv.reader(f)
for idx, row in enumerate(csv_reader):
if '0' in row:
id_number_m = (row[1])
Fuel_type_m = (row[2])
Exact_stove_m = (row[3])
Kitchen_Hapex_m = (row[4])
Cook_hapex_m = (row[5])
elif 'Fuel Raw Data' in row:
data_start = idx
break
Day_data = pd.read_csv(file, skiprows=data_start)
Minutes_of_collection = len(Day_data.iloc[:, 1])
FUEL_removal = Day_data.iloc[:, 1]
FUEL_SET = []
count = 0
for a in FUEL_removal:
count = count + 1
if count == Minutes_of_collection:
break
elif a != FUEL_removal.iloc[count] or (count+1) == Minutes_of_collection:
FUEL_SET.append(a)
#Fuel Collection
if np.average(Day_data.iloc[:, 0]) != -1:
HH_KG_removed = FUEL_SET
T_D_Fuel.extend(Day_data.iloc[:, 0])
T_D_KG_removed.extend((Day_data.iloc[:, 1]))
HH_fuel_removed_for_phase.append(sum(HH_KG_removed))
T_d_set_fuel.extend(set(Day_data.iloc[:, 1]))
else:
HH_fuel_removed_for_phase.append(-1)
#Temperature Collection
if np.average(Day_data.iloc[:, 2]) != -1:
T_D_Temp.extend(Day_data.iloc[:, 2])
HH_avg_temp.append((int((np.average(Day_data.iloc[:, 2])) * 100)) / 100)
HH_std_temp.append((int((stat.stdev(Day_data.iloc[:, 2])) * 100)) / 100)
else:
HH_avg_temp.append(-1)
HH_std_temp.append(-1)
#Cook Hapex Collection
if np.average(Day_data.iloc[:, 3]) != -1:
T_D_Cook_comp.extend(Day_data.iloc[:, 3])
T_D_Cook_PM.extend(Day_data.iloc[:, 5])
HH_avg_cook_comp.append(int(((sum(Day_data.iloc[:, 3])) / Minutes_of_collection) * 100))
HH_sum_cook_pm.append((int((sum(Day_data.iloc[:, 5])) * 100)) / 100)
HH_avg_cook_PM.append((int((np.average(Day_data.iloc[:, 5])) * 100)) / 100)
HH_std_cook_PM.append((int((stat.stdev(Day_data.iloc[:, 5])) * 100)) / 100)
else:
HH_sum_cook_pm.append(-1)
HH_avg_cook_comp.append(-1)
HH_avg_cook_PM.append(-1)
HH_std_cook_PM.append(-1)
#Kitchen HAPEx Collection
if np.average(Day_data.iloc[:, 6]) != -1:
T_D_Kit_PM.extend(Day_data.iloc[:, 6])
T_D_Kit_comp.extend((Day_data.iloc[:, 4]))
HH_avg_Kit_comp.append(int(((sum(Day_data.iloc[:, 4])) / Minutes_of_collection) * 100))
HH_sum_KIT_PM.append((int((sum(Day_data.iloc[:, 6])) * 100)) / 100)
HH_avg_Kit_PM.append((int((np.average(Day_data.iloc[:, 6])) * 100)) / 100)
HH_std_Kit_PM.append((int((stat.stdev(Day_data.iloc[:, 6])) * 100)) / 100)
else:
HH_sum_KIT_PM.append(-1)
HH_avg_Kit_comp.append(-1)
HH_avg_Kit_PM.append(-1)
HH_std_Kit_PM.append(-1)
#Household identifiers
ID_HH_m.append(id_number_m)
HH_total_time_f_collection.append(Minutes_of_collection)
# ## Day Summary is next, has fuel removed per day, percentage of movement per day, precentage of kitchen comp moving
# #these are values that are going to be extended throughout the whole code
#
#
KG_Per_Day =[]
PC_Cook_Comp =[]
PC_Kit_com = []
#specfic sensors for each household_only over summary
ID_HH_s = []
Day_tally_s = []
HH_avg_Fuel_removed_per_day = []
HH_avg_Kit_comp_per_day = []
HH_avg_cook_comp_per_day = []
KG_removed_sum = []
#For the Day summary that is to be used later
os.chdir("C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/"+ Phase +"/Compiler_"+Exact_num+"_exact/Raw_D_summary")
# For Hood portion
#os.chdir("C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2H/Compiler/Raw_Day/Raw_D_summary")
# For Hood portion
Day_sum_path = os.getcwd()
csv_R_s = glob.glob(os.path.join(Day_sum_path, "*.csv"))
for file_s in csv_R_s:
with open(file_s, 'r') as f:
csv_reader = csv.reader(f)
for idx, row in enumerate(csv_reader):
if '0' in row:
id_number_s = ((row[1]))
Fuel_type_s = (row[2])
Exact_stove_s = (row[3])
Kitchen_Hapex_s = (row[4])
Cook_hapex_s = (row[5])
elif 'Fuel Removed (FUEL)' in row:
data_start = idx
break
Day_SUM_data = pd.read_csv(file_s, skiprows=data_start)
#next is for specific day categories
counting_days = len(Day_SUM_data.iloc[:,0])
fuel_day_removal = list(set((Day_SUM_data.iloc[:,0])))
# Fuel Collection
if np.average(Day_SUM_data.iloc[:,0]) != -1:
KG_removed_sum.append((int((sum(fuel_day_removal))*100))/100)
#HH_avg_Fuel_removed_per_day.append(((int((KG_removed_sum) / counting_days)) * 1000) / 1000)
KG_Per_Day.extend(Day_SUM_data.iloc[:, 0])
else:
KG_removed_sum.append(-1)
#HH_avg_Fuel_removed_per_day.append(-1)
#Cook HAPEx Collection
if np.average(Day_SUM_data.iloc[:,1]) != -1:
Cook_Comp = Day_SUM_data.iloc[:,1]
PC_Cook_Comp.extend(Day_SUM_data.iloc[:, 1])
HH_avg_cook_comp_per_day.append(((int(sum(Cook_Comp) / counting_days)) * 1000) / 1000)
else:
HH_avg_cook_comp_per_day.append(-1)
#kitchen HAPEx Collection
if np.average(Day_SUM_data.iloc[:,2]) != -1:
KIT_comp = Day_SUM_data.iloc[:,2]
HH_avg_Kit_comp_per_day.append(((int(sum(KIT_comp) / counting_days)) * 1000) / 1000)
PC_Kit_com.extend(Day_SUM_data.iloc[:,2])
else:
HH_avg_Kit_comp_per_day.append(-1)
# this is for length of day, this is not taking into effect the installation or removal
Day_tally_s.append(counting_days)
#Household identifiers
ID_HH_s.append(id_number_s)
# making a dictionary, first is for hh and number inside list
## the HH number is to append correct metric to right hh This will have to be inside each csv loop
# this first one is a tester for the first two HH 1001 and 1005
print('-----------------------EVENT TIME-----------------------------')
# this next section is for the event to compile and to compare to others
# metrics to be compared to the others
### event is next
T_E_Fuel = []
T_E_KG_removed = []
T_E_Temp = []
T_E_Cook_comp = []
T_E_Cook_PM = []
T_E_Kit_comp = []
T_E_Kit_PM = []
ID_HH_EM = []
HH_fuel_removed_for_event = []
HH_Num_Events_observed = []
HH_E_avg_temp = []
HH_E_avg_cook_comp = []
HH_E_avg_Kit_comp = []
HH_E_avg_cook_PM =[]
HH_E_avg_Kit_PM =[]
HH_E_std_temp = []
HH_E_std_cook_comp = []
HH_E_std_Kit_comp = []
HH_E_std_cook_PM =[]
HH_E_std_Kit_PM =[]
HH_Cooking_length = []
os.chdir("C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/"+ Phase +"/Compiler_"+Exact_num+"_exact/Raw_E_metrics")
Event_met_path = os.getcwd()
csv_E_m = glob.glob(os.path.join(Event_met_path, "*.csv"))
for file in csv_E_m:
with open(file, 'r') as f:
csv_reader = csv.reader(f)
for idx, row in enumerate(csv_reader):
if '0' in row:
id_number_E_m = (row[1])
Fuel_type_E_m = (row[2])
Exact_stove_m = (row[3])
Kitchen_Hapex_E_m = (row[4])
Cook_hapex_E_m = (row[5])
elif 'Fuel Raw Data' in row:
data_start = idx
break
Event_data = pd.read_csv(file, skiprows=data_start)
Minutes_of_collection = len(Event_data.iloc[:, 1])
Cooking_time = sum([a for a in Event_data.iloc[:, 1]])
# going to use the HH_summary_event data to get a total of all removed kg
#HH_KG_E_removed = ((int((sum(list(set(Event_data.iloc[:, 1])))) * 100)) / 100)
#Fuel Collection
if np.average(Event_data.iloc[:, 0]) != -1:
T_E_Fuel.extend(Event_data.iloc[:, 0])
T_E_KG_removed.extend((Event_data.iloc[:, 1]))
#HH_fuel_removed_for_event.append(sum(HH_KG_E_removed))
#temperature Collection
if np.average(Event_data.iloc[:, 2]) != -1:
T_E_Temp.extend(Event_data.iloc[:, 2])
HH_E_avg_temp.append((int((np.average(Event_data.iloc[:, 2])) * 100)) / 100)
HH_E_std_temp.append((int((stat.stdev(Event_data.iloc[:, 2])) * 100)) / 100)
else:
HH_E_avg_temp.append(-1)
HH_E_std_temp.append(-1)
#Cook HAPEx Collection
if np.average(Event_data.iloc[:, 3]) != -1:
T_E_Cook_comp.extend(Event_data.iloc[:, 3])
T_E_Cook_PM.extend(Event_data.iloc[:, 5])
HH_E_avg_cook_comp.append(int(((sum(Event_data.iloc[:, 3])) / Minutes_of_collection) * 100))
HH_E_avg_cook_PM.append((int((np.average(Event_data.iloc[:, 5])) * 100)) / 100)
HH_E_std_cook_PM.append((int((stat.stdev(Event_data.iloc[:, 5])) * 100)) / 100)
else:
HH_E_avg_cook_comp.append(-1)
HH_E_avg_cook_PM.append(-1)
HH_E_std_cook_PM.append(-1)
#Kitchen HAPEx
if np.average((Event_data.iloc[:, 4])) != -1:
T_E_Kit_comp.extend((Event_data.iloc[:, 4]))
T_E_Kit_PM.extend(Event_data.iloc[:, 6])
HH_E_avg_Kit_comp.append(int(((sum(Event_data.iloc[:, 4])) / Minutes_of_collection) * 100))
HH_E_avg_Kit_PM.append((int((np.average(Event_data.iloc[:, 6])) * 100)) / 100)
HH_E_std_Kit_PM.append((int((stat.stdev(Event_data.iloc[:, 6])) * 100)) / 100)
else:
HH_E_avg_Kit_comp.append(-1)
HH_E_avg_Kit_PM.append(-1)
HH_E_std_Kit_PM.append(-1)
#household identifers
ID_HH_EM.append(id_number_E_m)
HH_Cooking_length.append(Minutes_of_collection)
print(' does the percentage make sense', HH_E_avg_cook_comp)
print(Minutes_of_collection)
print(HH_E_avg_Kit_comp)
print(len(Event_data.iloc[:, 3]))
print(HH_avg_cook_PM)
# this is starting the event summary file that has, length of event, Fuel removed, Removed time,
# inserted time, percentage of cook comp, and kitchen comp
#specifics for household over summary
ID_HH_Event_S = []
Event_Number_tally = []
KG_per_event = []
HH_PE_Cook_Comp =[]
HH_PE_Kit_com = []
HH_Time_Fuel_remove = []
HH_Time_Fuel_Insert = []
HH_avg_cooking_length = []
# bellow is for the total to be averaged out later for all households
T_E_Length_of_event = []
T_E_Fuel_used_Event = []
T_E_removed_Time = []
os.chdir("C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/"+ Phase +"/Compiler_"+Exact_num+"_exact/Raw_E_summary")
Event_Sum_path = os.getcwd()
csv_E_S = glob.glob(os.path.join(Event_Sum_path, "*.csv"))
for file_s in csv_E_S:
with open(file_s, 'r') as f:
csv_reader = csv.reader(f)
for idx, row in enumerate(csv_reader):
if '0' in row:
id_number_E_s = (row[1])
Fuel_type_E_s = (row[2])
Exact_stove_E_s = (row[3])
Kitchen_Hapex_E_s = (row[4])
Cook_hapex_E_s = (row[5])
elif 'Fuel Removed (FUEL)' in row:
data_start = idx
break
Event_SUM_data = pd.read_csv(file_s, skiprows=data_start)
#First is a tally of the number of events
if np.average(Event_SUM_data.iloc[:,0]) != -1:
how_many_events = len(Event_SUM_data.iloc[:,0])
HH_avg_cooking_length.append(Event_SUM_data.iloc[:,0])
Event_Number_tally.append(how_many_events)
T_E_Length_of_event.extend(Event_SUM_data.iloc[:, 0])
else:
Event_Number_tally.append(-1)
HH_avg_cooking_length.append(-1)
ID_HH_Event_S.append(id_number_E_s)
#Fuel Collection
HH_KG_E_removed = ((int((sum(list(set(Event_SUM_data.iloc[:, 1])))) * 100)) / 100)
if np.average(Event_SUM_data.iloc[:,1]) != -1:
Fuel_removed = Event_SUM_data.iloc[:,1]
KG_per_event.append(((int(sum(Fuel_removed)/how_many_events))*1000)/1000)
HH_Time_Fuel_remove.append(Event_SUM_data.iloc[:, 2])
HH_Time_Fuel_Insert.append(Event_SUM_data.iloc[:, 3])
T_E_Fuel_used_Event.extend(Fuel_removed)
T_E_removed_Time.extend(Event_SUM_data.iloc[:, 2])
HH_fuel_removed_for_event.append((HH_KG_E_removed))
else:
KG_per_event.append(-1)
HH_Time_Fuel_remove.append([0,-1])
HH_Time_Fuel_Insert.append(-1)
HH_fuel_removed_for_event.append(-1)
#Cook HAPEx Collection
if np.average(Event_SUM_data.iloc[:,4]) != -1:
HH_PE_Cook_Comp.append(Event_SUM_data.iloc[:,4])
else:
HH_PE_Cook_Comp.append(-1)
#Kitchen HAPEx Collection
if np.average(Event_SUM_data.iloc[:,5]) != -1:
HH_PE_Kit_com.append(Event_SUM_data.iloc[:,5])
else:
HH_PE_Kit_com.append(-1)
##lastly, the last csv file is the first five minutes of cooking event
print('----------------five metrics----------')
### we only care about the cook and kitchen PM
ID_Five_Event = []
T_Five_Cook_PM = []
T_Five_KIT_PM = []
HH_Avg_PP_five_cook = []
HH_Avg_PM_five_kit = []
HH_STD_PP_five_cook = []
HH_STD_PM_five_kit = []
os.chdir("C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/"+ Phase +"/Compiler_"+Exact_num+"_exact/Raw_E_first_five")
# For Hood portion
#os.chdir("C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2H/Compiler/Raw_E_first_five")
# For Hood portion
Event_five_path = os.getcwd()
csv_E_5 = glob.glob(os.path.join(Event_five_path, "*.csv"))
for file_5 in csv_E_5:
with open(file_5, 'r') as f:
csv_reader = csv.reader(f)
for idx, row in enumerate(csv_reader):
if '0' in row:
id_number_5 = (row[1])
# #Fuel_type_5 = (row[2])
# Exact_stove_5 = (row[3])
# Kitchen_Hapex_5 = (row[4])
# Cook_hapex_5 = (row[5])
elif 'Fuel Removed (FUEL)' in row:
data_start = idx
break
Event_5_data = pd.read_csv(file_5, skiprows=data_start)
#Cook HAPEx Collection
if np.average(Event_5_data.iloc[:, 0]) != -1:
HH_Avg_PP_five_cook.append((int((np.average(Event_5_data.iloc[:, 0])) * 100)) / 100)
HH_STD_PP_five_cook.append((int((stat.stdev(Event_5_data.iloc[:, 0])) * 100)) / 100)
T_Five_Cook_PM.extend(Event_5_data.iloc[:, 0])
else:
HH_Avg_PP_five_cook.append(-1)
HH_STD_PP_five_cook.append(-1)
#Kitchen HAPEx Collection
if np.average(Event_5_data.iloc[:, 1]) != -1:
HH_Avg_PM_five_kit.append((int((np.average(Event_5_data.iloc[:, 1])) * 100)) / 100)
HH_STD_PM_five_kit.append((int((stat.stdev(Event_5_data.iloc[:, 1])) * 100)) / 100)
T_Five_KIT_PM.extend((Event_5_data.iloc[:, 1]))
else:
HH_Avg_PM_five_kit.append(-1)
HH_STD_PM_five_kit.append(-1)
Event_5_data = | pd.read_csv(file_5, skiprows=data_start) | pandas.read_csv |
from .base_processing import path_data, path_dictionary
import pandas as pd
"""
Features used :
100020 - FVC, FEV1, PEF
Errors features : None
Missing : None
"""
def read_spirometry_data(**kwargs):
## deal with kwargs
nrows = None
if 'nrows' in kwargs.keys():
nrows = kwargs['nrows']
## Create feature name dict
df_features = pd.read_csv(path_dictionary, usecols = ["FieldID", "Field"])
df_features.set_index('FieldID', inplace = True)
feature_id_to_name = df_features.to_dict()['Field']
list_df = []
for instance in [0, 1, 2, 3]:
def custom_apply(row, instance):
flag_0 = row['3061-%s.0' % instance]
flag_1 = row['3061-%s.1' % instance]
flag_2 = row['3061-%s.2' % instance]
if flag_0 == 0 or flag_0 == 32:
return pd.Series(row[['3064-%s.0' % instance, '3062-%s.0' % instance, '3063-%s.0' % instance] + ['21003-%s.0' % instance, '31-0.0']].values)
else:
if flag_1 == 0 or flag_1 == 32:
return pd.Series(row[['3064-%s.1' % instance, '3062-%s.1' % instance, '3063-%s.1' % instance] + ['21003-%s.0'% instance, '31-0.0']].values)
else :
if flag_2 == 0 or flag_2 == 32:
return pd.Series(row[['3064-%s.2' % instance, '3062-%s.2' % instance, '3063-%s.2'% instance] + ['21003-%s.0'% instance, '31-0.0']].values)
else:
return pd.Series([None, None, None] + ['21003-%s.0'% instance, '31-0.0'])
cols = ['3064-%s.' % instance, '3062-%s.' % instance, '3063-%s.' % instance, '3061-%s.' % instance]
temp = pd.read_csv(path_data, nrows = nrows, usecols = [elem + str(int_) for elem in cols for int_ in range(3)] + ['eid', '21003-%s.0'% instance, '31-0.0']).set_index('eid')
temp.index = temp.index.rename('id')
temp = temp.apply(lambda row : custom_apply(row, instance = instance), axis = 1)
df = temp[~temp.isna().any(axis = 1)]
df.columns = ['3064-%s.0'% instance, '3062-%s.0'% instance, '3063-%s.0'% instance] + ['21003-%s.0'% instance, '31-0.0']
features_index = df.columns
features = []
for elem in features_index:
if elem != '21003-%s.0'% instance and elem != '31-0.0':
features.append(feature_id_to_name[int(elem.split('-')[0])] + elem.split('-')[1][-2:])
else:
features.append(feature_id_to_name[int(elem.split('-')[0])])
df.columns = features
df['eid'] = df.index
df.index = df.index.astype('str') + '_' + str(instance)
list_df.append(df)
return | pd.concat(list_df) | pandas.concat |
import torch
import pandas as pd
from tqdm import tqdm
import cv2
import numpy as np
from torch.utils.data import DataLoader, Dataset
from data import TestDataset,mask2rle
from albumentations import Compose,Normalize,HorizontalFlip,VerticalFlip
from model_kaggle import Unet
import torch.nn.functional as F
import warnings
warnings.filterwarnings("ignore")
def TTA(image,model):
#get average of multi version of test image augmentation
#batch size must be 1
#imput: img:[256,1600,3],torch.Tensor
#output: pred_result:[4,256,1600],np.array
h,w,c = image.shape
horizon_trans = Compose([HorizontalFlip(p=1)])
vertical_trans = Compose([VerticalFlip(p=1)])
rotate_trans = Compose([HorizontalFlip(p=1),VerticalFlip(p=1)])
none_trans = Compose([])
trans_zoo = [horizon_trans,vertical_trans,rotate_trans,none_trans]
pred_total = np.empty((len(trans_zoo),h,w,4))
for i,tran in enumerate(trans_zoo):
#img->norm+trans->predict->pred_mask->re-trans
#numpy.array
img_aug = tran(image=image.numpy())['image'].squeeze() #[256,1600,3]
#img_aug = normal_trans(image=img_aug)['image'].squeeze()
img_aug = torch.from_numpy(img_aug).permute((2,0,1)).unsqueeze(0).cuda() #[1,3,256,1600]
pred_aug = model(img_aug)
pred_aug = F.sigmoid(pred_aug).detach().cpu().numpy()#[1,4,256,1600]
pred_aug = pred_aug.squeeze().transpose((1,2,0)) #[256,1600,4]
pred_recover = tran(image=pred_aug)['image'].squeeze() #[256,1600,4]
pred_total[i] = pred_recover
pred_result = np.mean(pred_total,axis=0) #[256,1600,4]
return pred_result.transpose((2,0,1)) #[4,256,1600]
def post_process(probability, threshold, min_size):
'''Post processing of each predicted mask, components with lesser number of pixels
than `min_size` are ignored'''
mask = (probability>threshold)
#cv2.threshold(probability, threshold, 1, cv2.THRESH_BINARY)[1]
num_component, component = cv2.connectedComponents(mask.astype(np.uint8))
predictions = np.zeros((256, 1600), np.float32)
num = 0
for c in range(1, num_component):
p = (component == c)
if p.sum() > min_size:
predictions[p] = 1
num += 1
return predictions, num
if __name__ == '__main__':
sample_submission_path = 'input/severstal-steel-defect-detection/sample_submission.csv'
test_data_folder = "input/severstal-steel-defect-detection/test_images"
# initialize test dataloader
best_threshold = [0.5,0.5,0.55,0.55]
num_workers = 6
batch_size = 4
print('best_threshold', best_threshold)
min_size = [800,2200,1000,3800]
mean = (0.485, 0.456, 0.406), # (0.39, 0.39, 0.39),
std = (0.229, 0.224, 0.225), # (0.17, 0.17, 0.17),
df = pd.read_csv(sample_submission_path)
testset = DataLoader(
TestDataset(test_data_folder, df, mean, std),
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True
)
# Initialize mode and load trained weights
ckpt_path = "weights/model_RAdamsteppytorchBCE970.pth"
device = torch.device("cuda")
model = Unet("resnet18", encoder_weights=None, classes=4, activation=None).to(device)
model.eval()
state = torch.load(ckpt_path, map_location=lambda storage, loc: storage)
state = state["state_dict"]
for k in list(state.keys()):
if 'module' == k.split('.')[0]:
state[k[7:]] = state.pop(k)
model.load_state_dict(state)
# start prediction
use_TTA = True
predictions = []
for i, batch in enumerate(tqdm(testset)):
fnames, images = batch
n, h, w, c = images.shape #[n,h,w,3]
if not use_TTA:
batch_preds = model(images.permute((0,3,1,2)).to(device))
batch_preds = F.sigmoid(batch_preds).detach().cpu().numpy() #[n,c,h,w]
else:
batch_preds = np.empty((n,4,h,w))
for b in range(images.shape[0]):
batch_preds[b] = TTA(images[b],model)
for fname, preds in zip(fnames, batch_preds): #preds:[c,h,w]
for cls, pred in enumerate(preds):
pred, _ = post_process(pred, best_threshold[cls], min_size[cls]) #pred:[h,w]
rle = mask2rle(pred)
name = fname + "_" + str(cls+1)#f"_{cls+1}"
predictions.append([name, rle])
# save predictions to submission.csv
df = | pd.DataFrame(predictions, columns=['ImageId_ClassId', 'EncodedPixels']) | pandas.DataFrame |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
from config import test_snr_dB
import pandas as pd
from scipy.stats import ttest_1samp
def plot_paper_results(folder_envtfs, folder_stft):
sns.set(style="whitegrid")
df_env = pd.read_csv('models\\' + folder_envtfs + '\\results.csv', sep=';')
df_stft = pd.read_csv('models\\' + folder_stft + '\\results.csv', sep=';')
df_orig = df_env.copy()
df_orig = df_orig.drop(['eSTOI pred.'],axis=1)
df_orig = df_orig.drop(['PESQ pred.'],axis=1)
df_orig = df_orig.rename(columns={'eSTOI orig.':'eSTOI pred.'})
df_orig = df_orig.rename(columns={'PESQ orig.':'PESQ pred.'})
df_orig[' '] = 'Original'
df_env[' '] = 'ENV-TFS'
df_stft[' '] = 'STFT'
df = pd.concat([df_orig, df_stft, df_env])
sns.set(style="ticks",font='STIXGeneral')
fig = plt.figure(figsize=(11, 4.5))
size=16
plt.subplot(121)
ax = sns.boxplot(x='SNR', y='eSTOI pred.', hue=' ', data=df, fliersize=1)
plt.xlabel('SNR (dB)', {'size': size})
plt.ylabel('eSTOI', {'size': size})
ax.legend_.remove()
# ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.8)
ax.tick_params(labelsize=size)
lines, labels = ax.get_legend_handles_labels()
# fig.legend(lines, labels, loc='upper center')
fig.legend(lines, labels, loc='upper center', bbox_to_anchor=(0.53, 0.10), shadow = False, ncol = 3, prop={'size': size-3})
plt.tight_layout()
# plt.savefig('fig4.1_estoi_total.pdf',dpi=2000)
# plt.show()
# plt.figure(figsize=(11, 4.5))
plt.subplot(122)
ax = sns.boxplot(x='SNR', y='PESQ pred.', hue=' ', data=df, fliersize=1)
ax.legend_.remove()
ax.tick_params(labelsize=size)
# ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.11), ncol = 3)
plt.xlabel('SNR (dB)',{'size': size})
plt.ylabel('PESQ', {'size': size})
# ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.8)
plt.tight_layout()
plt.savefig('fig4_estoi_pesq_total.pdf',dpi=2000)
plt.show()
# multi plot
sns.set(style="ticks",font='STIXGeneral',font_scale=1.3)
g = sns.relplot(x="SNR", y="eSTOI pred.", hue = " ", col = "Noise", data = df, kind = "line",
col_wrap=5, height=2.5, aspect=0.8, legend='full')
# plt.tight_layout()
g.fig.subplots_adjust(wspace=0.10)
g.set_ylabels('eSTOI')
g.set_xlabels('SNR (dB)')
g.set(xticks=[-6, 0, 6])
g.set(xlim=(min(test_snr_dB), max(test_snr_dB)))
g.set(ylim=(0, 1))
g.set_titles("{col_name}",)
# for a in g.axes:
# a.axhline(a.get_yticks()[1], alpha=0.5, color='grey')
leg = g._legend
leg.set_bbox_to_anchor([0.84, 0.86]) # coordinates of lower left of bounding box
leg._loc = 1
plt.savefig('fig5_estoi_per_noise.pdf',bbox_inches='tight',dpi=2000)
plt.show()
# eSTOI increase histogram
plt.figure()
ax = sns.distplot(df_env['eSTOI pred.'] - df_env['eSTOI orig.'], kde_kws={"shade": True}, norm_hist=True, label='ENV-TFS')
sns.distplot(df_stft['eSTOI pred.'] - df_stft['eSTOI orig.'], kde_kws={"shade": True}, norm_hist=True, label='STFT')
plt.legend()
vals = ax.get_xticks()
ax.set_xticklabels(['{:,.0%}'.format(x) for x in vals])
plt.xlabel('eSTOI increase')
plt.ylabel('density')
plt.tight_layout()
plt.show()
# PESQ increase per snr histogram
# ax = sns.kdeplot(df_env['SNR'], df_env['PESQ pred.'] - df_env['PESQ orig.'], cmap="Reds", shade=True,shade_lowest=False, label='ENV')
# sns.kdeplot(df_stft['SNR'], df_stft['PESQ pred.'] - df_stft['PESQ orig.'], cmap="Blues", shade=True,shade_lowest=False, label='STFT')
ax = sns.distplot(df_env['PESQ pred.'] - df_env['PESQ orig.'], kde_kws={"shade": True}, norm_hist=True,
label='ENV-TFS')
sns.distplot(df_stft['PESQ pred.'] - df_stft['PESQ orig.'], kde_kws={"shade": True}, norm_hist=True, label='STFT')
plt.legend()
vals = ax.get_xticks()
plt.xlabel('PESQ increase')
plt.ylabel('density')
plt.tight_layout()
plt.show()
return
def plot_matlab_results(folder_envtfs, folder_stft):
df_env1 = pd.read_excel('models\\' + folder_envtfs + '\\HA_1.xls')
df_env2 = pd.read_excel('models\\' + folder_envtfs + '\\HA_2.xls')
df_env3 = pd.read_excel('models\\' + folder_envtfs + '\\HA_3.xls')
df_env4 = pd.read_excel('models\\' + folder_envtfs + '\\HA_4.xls')
df_env5 = pd.read_excel('models\\' + folder_envtfs + '\\HA_5.xls')
df_env6 = pd.read_excel('models\\' + folder_envtfs + '\\HA_6.xls')
df_stft1 = pd.read_excel('models\\' + folder_stft + '\\HA_1.xls')
df_stft2 = pd.read_excel('models\\' + folder_stft + '\\HA_2.xls')
df_stft3 = pd.read_excel('models\\' + folder_stft + '\\HA_3.xls')
df_stft4 = pd.read_excel('models\\' + folder_stft + '\\HA_4.xls')
df_stft5 = pd.read_excel('models\\' + folder_stft + '\\HA_5.xls')
df_stft6 = pd.read_excel('models\\' + folder_stft + '\\HA_6.xls')
df_env1['Profile'] = 'HL1'
df_env2['Profile'] = 'HL2'
df_env3['Profile'] = 'HL3'
df_env4['Profile'] = 'HL4'
df_env5['Profile'] = 'HL5'
df_env6['Profile'] = 'HL6'
df_stft1['Profile'] = 'HL1'
df_stft2['Profile'] = 'HL2'
df_stft3['Profile'] = 'HL3'
df_stft4['Profile'] = 'HL4'
df_stft5['Profile'] = 'HL5'
df_stft6['Profile'] = 'HL6'
df_env = pd.concat([df_env1, df_env2, df_env3, df_env4, df_env5, df_env6])
df_stft = pd.concat([df_stft1, df_stft2, df_stft3, df_stft4, df_stft5, df_stft6])
df_envtemp = [df_env1, df_env2, df_env3, df_env4, df_env5, df_env6]
df_stftemp = [df_stft1, df_stft2, df_stft3, df_stft4, df_stft5, df_stft6]
for i in range(6):
df = df_envtemp[i]
dfstft = df_stftemp[i]
print('HASPI', i+1)
print("Origin: %.1f ± %.1f" % (100* df.mean()['HASPI_orig'], 100*df.std()['HASPI_orig']))
print("STFT: %.1f ± %.1f" %(100* dfstft.mean()['HASPI_predi'], 100*dfstft.std()['HASPI_predi']))
print("ENVTFS: %.1f ± %.1f" %(100* df.mean()['HASPI_predi'], 100*df.std()['HASPI_predi']))
for i in range(6):
df = df_envtemp[i]
dfstft = df_stftemp[i]
print('HASQI', i + 1)
print("Origin: %.1f ± %.1f" % (100 * df.mean()['HASQI_orig'], 100 * df.std()['HASQI_orig']))
print("STFT: %.1f ± %.1f" %(100* dfstft.mean()['HASqI_predi'], 100*dfstft.std()['HASqI_predi']))
print("ENVTFS: %.1f ± %.1f" % (100 * df.mean()['HASqI_predi'], 100 * df.std()['HASqI_predi']))
df_orig = df_env.copy()
df_orig = df_orig.drop(['HASPI_predi'], axis=1)
df_orig = df_orig.rename(columns={'HASPI_orig': 'HASPI_predi'})
df_orig[' '] = 'Original'
df_env[' '] = 'ENV-TFS'
df_stft[' '] = 'STFT'
df = pd.concat([df_orig, df_stft, df_env])
sns.set(style="ticks", font='STIXGeneral', font_scale=1.3)
g = sns.relplot(x="snrs", y="HASPI_predi", hue=' ', col="Profile", data=df, kind="line",
col_wrap=3, height=2.5, aspect=0.8, legend='full')
# plt.tight_layout()
g.fig.subplots_adjust(wspace=0.10)
g.set_ylabels('HASPI')
g.set_xlabels('SNR (dB)')
g.set(xticks=[-6, 0, 6])
g.set(xlim=(min(test_snr_dB), max(test_snr_dB)))
g.set(ylim=(0, 1))
g.set_titles("{col_name}", )
# for a in g.axes:
# a.axhline(a.get_yticks()[1], alpha=0.5, color='grey')
leg = g._legend
leg.set_bbox_to_anchor([0.89, 0.84]) # coordinates of lower left of bounding box
leg._loc = 1
from matplotlib.transforms import Bbox
plt.savefig('fig6_haspi_per_audiogram.pdf', bbox_inches=Bbox([[0., 0.], [6.8, 5.]]),dpi=2000)
plt.show()
def print_matlab_results(folder_envtfs, folder_stft):
df_env1 = pd.read_excel('models\\' + folder_envtfs + '\\HA_1.xls')
df_env2 = pd.read_excel('models\\' + folder_envtfs + '\\HA_2.xls')
df_env3 = pd.read_excel('models\\' + folder_envtfs + '\\HA_3.xls')
df_env4 = pd.read_excel('models\\' + folder_envtfs + '\\HA_4.xls')
df_env5 = pd.read_excel('models\\' + folder_envtfs + '\\HA_5.xls')
df_env6 = pd.read_excel('models\\' + folder_envtfs + '\\HA_6.xls')
df_stft1 = pd.read_excel('models\\' + folder_stft + '\\HA_1.xls')
df_stft2 = pd.read_excel('models\\' + folder_stft + '\\HA_2.xls')
df_stft3 = pd.read_excel('models\\' + folder_stft + '\\HA_3.xls')
df_stft4 = pd.read_excel('models\\' + folder_stft + '\\HA_4.xls')
df_stft5 = pd.read_excel('models\\' + folder_stft + '\\HA_5.xls')
df_stft6 = pd.read_excel('models\\' + folder_stft + '\\HA_6.xls')
df_env1['Profile'] = 'HA1'
df_env2['Profile'] = 'HA2'
df_env3['Profile'] = 'HA3'
df_env4['Profile'] = 'HA4'
df_env5['Profile'] = 'HA5'
df_env6['Profile'] = 'HA6'
df_stft1['Profile'] = 'HA1'
df_stft2['Profile'] = 'HA2'
df_stft3['Profile'] = 'HA3'
df_stft4['Profile'] = 'HA4'
df_stft5['Profile'] = 'HA5'
df_stft6['Profile'] = 'HA6'
df_env = pd.concat([df_env1, df_env2, df_env3, df_env4, df_env5, df_env6])
df_stft = | pd.concat([df_stft1, df_stft2, df_stft3, df_stft4, df_stft5, df_stft6]) | pandas.concat |
import os
import pandas as pd
import argparse
import ujson
from bs4 import BeautifulSoup
import re
import datetime
import warnings
warnings.simplefilter("ignore")
""" GETS INPUT READY FOR INDICIO """
def parse_arguments():
parser = argparse.ArgumentParser(description="reads in original data files / directories")
parser.add_argument('orgdata', type=str, default='')
parser.add_argument('savedir', type=str, default='')
parser.add_argument('reldata', type=str, default='')
parser.add_argument('outputfile', type=str, default='')
args = parser.parse_args()
return args
def load_file(filePath, saveData, relData, outputFile):
# file = pd.read_csv()
fname, ext = os.path.splitext(filePath)
dictionary = {}
if ext == '.json':
data = ujson.loads(open(filePath).read())
rel = ujson.loads(open(relData).read())
for d1 in data:
sid = d1.get('SubmissionID')
title = d1.get('SubmissionTitle')
cleantext = BeautifulSoup(title).get_text()
cleantext = re.sub("[^a-zA-Z]", " ", cleantext)
dictionary[sid] = [cleantext, convertunixtodate(d1.get('SubmissionTime')), rel.get(sid)]
com = d1.get("Comments")
for d2 in com:
cid = d2.get("CommentID")
comtext = d2.get('CommentText')
comtext = BeautifulSoup(comtext).get_text()
comtext = re.sub("'", "", comtext)
comtext = re.sub("[^a-zA-Z]", " ", comtext)
dictionary[cid] = [comtext, convertunixtodate(d2.get('CommentTime')), rel.get(cid)]
elif ext == '.csv' or ext == '.tsv':
data = pd.read_csv(filePath, header=0, index_col=[], delimiter=",", quoting=1, encoding='latin1')
for row in data.itertuples():
if (not (pd.isnull(row.id) or pd.isnull(row.text))):
dictionary[row.id] = row.text
else:
pathsList = []
if os.path.isdir(filePath):
for file in os.listdir(filePath):
if os.path.splitext(file)[-1] == '.csv' or os.path.splitext(file)[-1] == '.tsv' or os.path.splitext(file)[-1] == '.txt':
pathsList.append(os.path.join(filePath, file))
else:
filePath = filePath.strip("[")
filePath = filePath.strip("]")
pathsList = (filePath).split(',')
for path in pathsList:
print("read: " + path)
data = | pd.read_table(path, header=0, delimiter="\t", encoding='latin1') | pandas.read_table |
import pandas as pd
import numpy as np
# 12.1 Categorical Data
s = | pd.Series([1, 2, 2, 3, 3, 3, 5]) | pandas.Series |
import pytest
import sys
import numpy as np
import swan_vis as swan
import networkx as nx
import math
import pandas as pd
###########################################################################
##################### Related to adding metadata ##########################
###########################################################################
class TestMetadata(object):
# test add_metadata - one after the other with dupe cols
# yes overwrite
def test_add_metadata_4(self):
sg = swan.SwanGraph()
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
meta = 'files/chr11_and_Tcf3_metadata_dupecols.tsv'
sg.add_metadata(meta, overwrite=True)
assert {'3','4'} == set(sg.adata.obs.cluster.tolist())
# test add_metadata - one after the other with dupe cols
# don'e overwrite
def test_add_metadata_3(self):
sg = swan.SwanGraph()
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
meta = 'files/chr11_and_Tcf3_metadata_dupecols.tsv'
sg.add_metadata(meta, overwrite=False)
assert {'2', '1'} == set(sg.adata.obs.cluster.tolist())
# test add_metadata - one after the other
def test_add_metadata_2(self):
pass
sg = swan.SwanGraph()
# just gencode vM21 chr 11 and tcf3
gtf = 'files/chr11_and_Tcf3.gtf'
sg.add_annotation(gtf, verbose=True)
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
meta = 'files/chr11_and_Tcf3_metadata_2.tsv'
sg.add_metadata(meta)
test = sg.adata.obs
data = [['D12', '1', 'K562', 'G0'],
['PB65_B017', '2', 'GM12878', 'M'],
['PB65_B018', '2', 'GM12878', 'S']]
cols = ['dataset', 'cluster', 'sample', 'cell_state']
ctrl = pd.DataFrame(data=data, columns=cols)
ctrl.index = ctrl.dataset
ctrl = ctrl[test.columns]
ctrl.sort_index(inplace=True)
test.sort_index(inplace=True)
print('test')
print(test)
print('control')
print(ctrl)
assert test.equals(ctrl)
# test add_metadata - vanilla
def test_add_metadata(self):
sg = swan.SwanGraph()
# just gencode vM21 chr 11 and tcf3
# gtf = 'files/chr11_and_Tcf3.gtf'
# sg.add_annotation(gtf, verbose=True)
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
# print(sg.t_df)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
test = sg.adata.obs
data = [['D12', '1', 'K562'],
['PB65_B017', '2', 'GM12878'],
['PB65_B018', '2', 'GM12878']]
cols = ['dataset', 'cluster', 'sample']
ctrl = pd.DataFrame(data=data, columns=cols)
ctrl.index = ctrl.dataset
ctrl = ctrl[test.columns]
ctrl.sort_index(inplace=True)
test.sort_index(inplace=True)
print('test')
print(test)
print('control')
print(ctrl)
assert test.equals(ctrl)
###########################################################################
############### Related to high-level dataset addition ####################
###########################################################################
class TestDataset(object):
# TODO
# add_dataset, add_transcriptome, add_annotation
# tests add_transcriptome - added after adding an annotation
def test_add_transcriptome_2(self):
sg = swan.SwanGraph()
sg.add_annotation('files/test_full_annotation.gtf')
sg.add_transcriptome('files/test_full.gtf')
# t_df
sg.t_df = sg.t_df[['tid', 'annotation']]
data = [['test1', True],
['test2', True],
['test3', False],
['test4', True],
['test5', True],
['test6', True]]
cols = ['tid', 'annotation']
ctrl_t_df = pd.DataFrame(data=data, columns=cols)
ctrl_t_df = swan.create_dupe_index(ctrl_t_df, 'tid')
ctrl_t_df = swan.set_dupe_index(ctrl_t_df, 'tid')
# first order to make them comparable
# sort all values by their IDs
sg.t_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_t_df = ctrl_t_df[sg.t_df.columns]
print('test')
print(sg.t_df)
print('control')
print(ctrl_t_df)
assert (sg.t_df == ctrl_t_df).all(axis=0).all()
# loc_df - new location at chr2, 65
print('test')
print(sg.loc_df)
ind = (sg.loc_df.chrom=='chr2')&(sg.loc_df.coord==65)
temp = sg.loc_df.loc[ind, 'annotation'].to_frame()
for i, entry in temp.iterrows():
assert entry.annotation == False
temp = sg.loc_df.loc[~ind]
for i, entry in temp.iterrows():
assert entry.annotation == True
# tests add_transcriptome - vanilla
def test_add_transcriptome_1(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
# tests add_annotation - transcriptome already in SG
def test_add_annotation_2(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.add_annotation('files/test_full_annotation.gtf')
# t_df
sg.t_df = sg.t_df[['tid', 'annotation']]
data = [['test1', True],
['test2', True],
['test3', False],
['test4', True],
['test5', True],
['test6', True]]
cols = ['tid', 'annotation']
ctrl_t_df = pd.DataFrame(data=data, columns=cols)
ctrl_t_df = swan.create_dupe_index(ctrl_t_df, 'tid')
ctrl_t_df = swan.set_dupe_index(ctrl_t_df, 'tid')
# first order to make them comparable
# sort all values by their IDs
sg.t_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_t_df = ctrl_t_df[sg.t_df.columns]
print('test')
print(sg.t_df)
print('control')
print(ctrl_t_df)
assert (sg.t_df == ctrl_t_df).all(axis=0).all()
# loc_df - new location at chr2, 65
print('test')
print(sg.loc_df)
ind = (sg.loc_df.chrom=='chr2')&(sg.loc_df.coord==65)
temp = sg.loc_df.loc[ind, 'annotation'].to_frame()
for i, entry in temp.iterrows():
assert entry.annotation == False
temp = sg.loc_df.loc[~ind]
for i, entry in temp.iterrows():
assert entry.annotation == True
# tests add_annotation - vanilla
def test_add_annotation_1(self):
sg = swan.SwanGraph()
sg.add_annotation('files/test_full_annotation.gtf')
# # loc_df
# data = [['chr1', 1, 0, True],
# ['chr1', 20, 1, True],
# ['chr1', 25, 2, True],
# ['chr1', 30, 3, True],
# ['chr1', 35, 4, True],
# ['chr1', 40, 5, True],
# ['chr2', 45, 6, True],
# ['chr2', 50, 7, True],
# ['chr2', 60, 8, True],
# ['chr2', 75, 10, True],
# ['chr2', 80, 11, True],
# ['chr2', 100, 12, True],
# ['chr2', 110, 13, True]]
# cols = ['chrom', 'coord', 'vertex_id', 'annotation']
# ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
# ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
# ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
#
# print('test')
# print(sg.loc_df)
# print('ctrl')
# print(ctrl_loc_df)
#
# print(sg.edge_df)
# assert 1 == 0
# # edge_df
# data = [[0, 1, '+', 'exon', 0, True],
# [1, 2],
# [2, 3],
# [3, 4],
# [4, 5],
# [5, 6],
# [6, 7],
#
#
# ]
# cols = ['v1', 'v2', 'strand', 'edge_type', 'annotation']
#
# # t_df
# data = [['test1', 'test1_tname', 'test1_gid', 'test1_gname', [0,1,2,3,4]], [0,1,2,3,4,5], True],
# ['test2', 'test2_tname', 'test2_gid', 'test2_gname', [5,6,7,8,9], [12,11,10,8,7,6], True],
# ['test4', 'test4_tname', 'test4_gid', 'test4_gname', [10], [6,7], True],
# ['test5', 'test5_tname', 'test2_gid', 'test2_gname', [5,11,12], [12,11,8,7], True],
# ['test6', 'test6_tname', 'test2_gid', 'test2_gname', [,6,7,8,9], [13,11,10,8,7,6], True]]
# cols = ['tid', 'tname', 'gid', 'gname', 'path', 'loc_path', 'annotation']
#
assert sg.annotation == True
assert 'annotation' in sg.t_df.columns
assert 'annotation' in sg.edge_df.columns
assert 'annotation' in sg.loc_df.columns
for ind, entry in sg.t_df.iterrows():
assert entry.annotation == True
assert entry.novelty == 'Known'
for ind, entry in sg.edge_df.iterrows():
assert entry.annotation == True
for ind, entry in sg.loc_df.iterrows():
assert entry.annotation == True
# tests:, label_annotated
# label annotated transcripts
def test_label_annotated(self):
sg = swan.SwanGraph()
data = [[0, [0,1]],
[1, [2,3]],
[2, [4,5]]]
sg.t_df = pd.DataFrame(data=data, columns=['tid', 'path'])
data = [[0,0,1], [1,1,2], [2,2,3], [3,3,4],
[4,4,5], [5,5,6]]
sg.edge_df = pd.DataFrame(data=data, columns=['edge_id', 'v1', 'v2'])
data = [0,1,2,3,4,5,6]
sg.loc_df = pd.DataFrame(data=data, columns=['vertex_id'])
tids = [0,1]
sg.label_annotated(tids)
ctrl_tids = [0,1]
tids = sg.t_df.loc[sg.t_df.annotation == True, 'tid'].tolist()
assert set(ctrl_tids) == set(tids)
ctrl_edges = [0,1,2,3]
edges = sg.edge_df.loc[sg.edge_df.annotation == True, 'edge_id'].tolist()
assert set(ctrl_edges) == set(edges)
ctrl_locs = [0,1,2,3,4]
locs = sg.loc_df.loc[sg.loc_df.annotation == True, 'vertex_id'].tolist()
assert set(ctrl_locs) == set(locs)
# add to empty sg, don't add isms
def test_add_transcriptome(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_novel_talon.gtf', include_isms=False)
print(sg.t_df)
assert "ISM" not in sg.t_df.novelty.unique()
# assert 1 == 0
# tests if correct error is thrown when adding annotation to
# sg that already has one
def test_add_annotation_already(self):
sg = swan.SwanGraph()
sg.annotation = True
with pytest.raises(Exception) as e:
sg.add_annotation('files/Canx.gtf')
assert 'Annotation already' in str(e.value)
# add annotation to empty sg
def test_add_annotation_empty_sg(self):
sg = swan.SwanGraph()
sg.add_annotation('files/test_full.gtf')
# check annotation columns
assert all(sg.t_df.annotation.tolist())
assert all(sg.edge_df.annotation.tolist())
assert all(sg.loc_df.annotation.tolist())
# check novelty column in t_df
assert len(sg.t_df.loc[sg.t_df.novelty=='Known']) == len(sg.t_df.index)
# check annotation flag
assert sg.annotation == True
# add annotation to sg with data already in it
def test_add_annotation_sg_data(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_novel.gtf')
sg.add_annotation('files/test_known.gtf')
# t_df
annot_tids = ['test1', 'test2', 'test4']
assert all(sg.t_df.loc[annot_tids, 'annotation'])
ctrl_novel_tids = ['test3', 'test5']
novel_tids = sg.t_df.loc[sg.t_df.annotation == False, 'tid'].tolist()
assert len(set(ctrl_novel_tids)-set(novel_tids)) == 0
assert len(ctrl_novel_tids) == len(novel_tids)
# make sure the novelty assignment worked
annot_tids = sg.t_df.loc[sg.t_df.annotation == True, 'tid'].tolist()
known_tids = sg.t_df.loc[sg.t_df.novelty == 'Known', 'tid'].tolist()
assert set(annot_tids) == set(known_tids)
annot_tids = sg.t_df.loc[sg.t_df.annotation == False, 'tid'].tolist()
known_tids = sg.t_df.loc[sg.t_df.novelty == 'Undefined', 'tid'].tolist()
assert set(annot_tids) == set(known_tids)
# loc_df
ctrl_novel_locs = [('chr2', 65)]
temp = sg.loc_df[sg.loc_df.annotation == False]
chroms = temp.chrom.tolist()
coords = temp.coord.tolist()
novel_locs = [(chrom, coord) for chrom, coord in zip(chroms, coords)]
print('control')
print(ctrl_novel_locs)
print('test')
print(novel_locs)
assert len(set(ctrl_novel_locs)-set(novel_locs)) == 0
assert len(novel_locs) == len(ctrl_novel_locs)
# edge_df
edge_df = sg.add_edge_coords()
edge_df = edge_df.loc[edge_df.annotation == False]
ctrl_novel_edges = [('chr2', 75, 65, '-', 'exon'),
('chr2', 65, 50, '-', 'intron'),
('chr2', 80, 60, '-', 'intron'),
('chr2', 60, 50, '-', 'exon')]
chroms = edge_df.chrom.tolist()
v1s = edge_df.v1_coord.tolist()
v2s = edge_df.v2_coord.tolist()
strands = edge_df.strand.tolist()
etypes = edge_df.edge_type.tolist()
novel_edges = [(chrom,v1,v2,strand,etype) for chrom,v1,v2,strand,etype \
in zip(chroms,v1s,v2s,strands,etypes)]
print('control')
print(ctrl_novel_edges)
print('test')
print(novel_edges)
assert len(set(ctrl_novel_edges)-set(novel_edges)) == 0
assert len(ctrl_novel_edges) == len(novel_edges)
# add annotation to sg with data where data contains dupe transcript
def test_add_annotation_sg_data_dupe_tid(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_novel_1.gtf')
sg.add_annotation('files/test_known.gtf')
# check with coord/chr bc of reindexing fuckery not being
# remimplemented yet
# t_df
annot_tids = ['test1', 'test2', 'test4']
assert all(sg.t_df.loc[annot_tids, 'annotation'])
ctrl_novel_tids = ['test3', 'test5']
novel_tids = sg.t_df.loc[sg.t_df.annotation == False, 'tid'].tolist()
assert len(set(ctrl_novel_tids)-set(novel_tids)) == 0
assert len(ctrl_novel_tids) == len(novel_tids)
# make sure the novelty assignment worked
annot_tids = sg.t_df.loc[sg.t_df.annotation == True, 'tid'].tolist()
known_tids = sg.t_df.loc[sg.t_df.novelty == 'Known', 'tid'].tolist()
assert set(annot_tids) == set(known_tids)
annot_tids = sg.t_df.loc[sg.t_df.annotation == False, 'tid'].tolist()
known_tids = sg.t_df.loc[sg.t_df.novelty == 'Undefined', 'tid'].tolist()
assert set(annot_tids) == set(known_tids)
# loc_df
ctrl_novel_locs = [('chr2', 65)]
temp = sg.loc_df[sg.loc_df.annotation == False]
chroms = temp.chrom.tolist()
coords = temp.coord.tolist()
novel_locs = [(chrom, coord) for chrom, coord in zip(chroms, coords)]
print('control')
print(ctrl_novel_locs)
print('test')
print(novel_locs)
assert len(set(ctrl_novel_locs)-set(novel_locs)) == 0
assert len(novel_locs) == len(ctrl_novel_locs)
# edge_df
edge_df = sg.add_edge_coords()
edge_df = edge_df.loc[edge_df.annotation == False]
ctrl_novel_edges = [('chr2', 75, 65, '-', 'exon'),
('chr2', 65, 50, '-', 'intron'),
('chr2', 80, 60, '-', 'intron'),
('chr2', 60, 50, '-', 'exon')]
chroms = edge_df.chrom.tolist()
v1s = edge_df.v1_coord.tolist()
v2s = edge_df.v2_coord.tolist()
strands = edge_df.strand.tolist()
etypes = edge_df.edge_type.tolist()
novel_edges = [(chrom,v1,v2,strand,etype) for chrom,v1,v2,strand,etype \
in zip(chroms,v1s,v2s,strands,etypes)]
print('control')
print(ctrl_novel_edges)
print('test')
print(novel_edges)
assert len(set(ctrl_novel_edges)-set(novel_edges)) == 0
assert len(ctrl_novel_edges) == len(novel_edges)
###########################################################################
###################### Related to file parsing ############################
###########################################################################
class TestFiles(object):
# tests GTF parsing
def test_parse_gtf(self):
gtf_file = 'files/Canx.gtf'
t_df, exon_df, from_talon = swan.parse_gtf(gtf_file, True, False)
t_df.index.name = 'tid_index'
t_df = t_df.sort_values(by='tid_index')
ctrl_t_df = pd.read_csv('files/Canx_transcript.tsv',sep='\t')
ctrl_t_df.set_index('tid_index', inplace=True)
ctrl_t_df = ctrl_t_df.sort_values(by='tid_index')
ctrl_exons = ctrl_t_df.exons.tolist()
ctrl_exons = [exons.split(',') for exons in ctrl_exons]
ctrl_t_df['exons'] = ctrl_exons
print(t_df == ctrl_t_df)
assert (t_df == ctrl_t_df).all(axis=0).all()
# tests TALON DB parsing - no pass_list
def test_parse_db_1(self):
db_file = 'files/test_full.db'
pass_list = 'files/test_full_pass_list.csv'
t_df, edge_df = swan.parse_db(db_file, None, False, True, False)
ctrl_t_df, ctrl_e_df = get_test_transcript_exon_dicts()
for key, item in ctrl_t_df.items():
item['exons'] = swan.reorder_exons(item['exons'])
ctrl_t_df = pd.DataFrame(ctrl_t_df).transpose()
ctrl_e_df = pd.DataFrame(ctrl_e_df).transpose()
# sort all values by their IDs
edge_df.sort_index(inplace=True)
t_df.sort_index(inplace=True)
ctrl_e_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_e_df = ctrl_e_df[edge_df.columns]
ctrl_t_df = ctrl_t_df[t_df.columns]
assert 'novelty' in t_df.columns
print('test')
print(edge_df)
print('control')
print(ctrl_e_df)
print(edge_df == ctrl_e_df)
assert (edge_df == ctrl_e_df).all(axis=0).all()
print('test')
print(t_df)
print(t_df.exons)
print('control')
print(ctrl_t_df)
print(ctrl_t_df.exons)
print(t_df == ctrl_t_df)
assert (t_df == ctrl_t_df).all(axis=0).all()
# tests TALON DB parsing - yes pass_list
def test_parse_db_2(self):
db_file = 'files/test_full.db'
pass_list = 'files/test_full_pass_list.csv'
t_df, edge_df = swan.parse_db(db_file, pass_list, False, True, False)
ctrl_t_df, ctrl_e_df = get_test_transcript_exon_dicts()
for key, item in ctrl_t_df.items():
item['exons'] = swan.reorder_exons(item['exons'])
# delete entries that weren't on pass list
del ctrl_e_df['chr2_45_50_+_exon']
del ctrl_t_df['test4']
ctrl_t_df = pd.DataFrame(ctrl_t_df).transpose()
ctrl_e_df = pd.DataFrame(ctrl_e_df).transpose()
# sort all values by their IDs
edge_df.sort_index(inplace=True)
t_df.sort_index(inplace=True)
ctrl_e_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_e_df = ctrl_e_df[edge_df.columns]
ctrl_t_df = ctrl_t_df[t_df.columns]
assert 'novelty' in t_df.columns
print('test')
print(edge_df)
print('control')
print(ctrl_e_df)
print(edge_df == ctrl_e_df)
assert (edge_df == ctrl_e_df).all(axis=0).all()
print('test')
print(t_df)
print(t_df.exons)
print('control')
print(ctrl_t_df)
print(ctrl_t_df.exons)
print(t_df == ctrl_t_df)
assert (t_df == ctrl_t_df).all(axis=0).all()
###########################################################################
####################### Related to DF creation ############################
###########################################################################
class TestCreateDFs(object):
# add_edge_coords, get_current_locs, get_current_edges,
# create_loc_dict, create_transcript_edge_dict create_dfs,
# tests add_edge_coords
def test_add_edge_coords(self):
sg = swan.SwanGraph()
sg = add_transcriptome_no_reorder_gtf(sg, 'files/test_full.gtf')
# sg.add_transcriptome('files/test_full.gtf')
cols = ['edge_id', 'v1', 'v2', 'strand', 'edge_type',
'v1_coord', 'v2_coord']
# print(sg.edge_df.head())
edge_df = sg.add_edge_coords()
print(edge_df.head())
edge_df = edge_df[cols]
ctrl_edge_df = pd.read_csv('files/test_add_edge_coords_result.tsv', sep='\t')
ctrl_edge_df = ctrl_edge_df[cols]
# first order to make them comparable
# sort all values by their IDs
edge_df.sort_values(by='edge_id', inplace=True)
ctrl_edge_df.sort_values(by='edge_id', inplace=True)
# and order columns the same way
ctrl_edge_df = ctrl_edge_df[edge_df.columns]
print('test')
print(edge_df)
print('control')
print(ctrl_edge_df)
assert (edge_df == ctrl_edge_df).all(axis=0).all()
# tests get_current_locs with an empty swangraph
def test_get_current_locs_empty_sg(self):
sg = swan.SwanGraph()
locs, n = sg.get_current_locs()
assert locs == {}
assert n == -1
# tests get_current_locs with a swangraph with data
def test_get_current_locs_sg_data(self):
sg = swan.SwanGraph()
cols = ['vertex_id', 'chrom', 'coord']
data = [[0, 1, 2], [1, 1, 3], [2, 3, 50]]
sg.loc_df = pd.DataFrame(data=data, columns=cols)
cols = ['tid']
data = [0]
sg.t_df = pd.DataFrame(data=data, columns=cols)
locs, n = sg.get_current_locs()
ctrl_locs = {(1,2):0, (1,3):1, (3,50):2}
assert locs == ctrl_locs
assert n == 2
# tests get_current_edges with an empty swangraph
def test_get_current_edges_empty_sg(self):
sg = swan.SwanGraph()
edges, n = sg.get_current_edges()
assert(edges == {})
assert(n == -1)
# tests get_current_edges in a sg with data
def test_get_current_edges_sg_data(self):
sg = swan.SwanGraph()
cols = ['vertex_id', 'chrom', 'coord']
data = [[0, 1, 2], [1, 1, 3], [2, 1, 50]]
sg.loc_df = pd.DataFrame(data=data, columns=cols)
cols = ['edge_id', 'v1', 'v2', 'strand', 'edge_type']
data = [[0, 0, 1, '+', 'exon'],
[1, 1, 2, '+', 'intron']]
sg.edge_df = pd.DataFrame(data=data, columns=cols)
cols = ['tid']
data = [0]
sg.t_df = pd.DataFrame(data=data, columns=cols)
edges, n = sg.get_current_edges()
ctrl = {(1,2,3,'+','exon'): {'edge_id': 0,
'edge_type': 'exon',
'v1': 0 ,
'v2': 1},
(1,3,50,'+','intron'): {'edge_id': 1,
'edge_type': 'intron',
'v1': 1,
'v2': 2}}
assert(edges == ctrl)
assert(n == 1)
# test create_loc_dict on an empty sg
# also checks to make sure exons that use the same loc
# don't result in dupe entries in loc_df
def test_create_loc_dict_empty_sg(self):
_, exons = get_test_transcript_exon_dicts()
sg = swan.SwanGraph()
locs = sg.create_loc_dict(exons)
ctrl_locs = {('chr1',1): 0,
('chr1', 20): 1,
('chr1', 25): 2,
('chr1', 30): 3,
('chr1', 35): 4,
('chr1', 40): 5,
('chr2', 100): 6,
('chr2', 80): 7,
('chr2', 75): 8,
('chr2', 60): 9,
('chr2', 50): 10,
('chr2', 45): 11,
('chr2', 65): 12
}
assert(ctrl_locs == locs)
# tests create_loc_dict when locs already exist in sg
def test_create_loc_dict_sg_data(self):
_, exons = get_test_transcript_exon_dicts()
# dummy preexisting data
sg = swan.SwanGraph()
data = [[0, 'chr1', 1], [1, 'chr2', 80]]
columns = ['vertex_id', 'chrom', 'coord']
sg.loc_df = pd.DataFrame(data=data, columns=columns)
cols = ['tid']
data = [0]
sg.t_df = pd.DataFrame(data=data, columns=cols)
locs = sg.create_loc_dict(exons)
ctrl_locs = {('chr1', 1):0,
('chr2', 80): 1,
('chr1', 20): 2,
('chr1', 25): 3,
('chr1', 30): 4,
('chr1', 35): 5,
('chr1', 40): 6,
('chr2', 100): 7,
('chr2', 75): 8,
('chr2', 60): 9,
('chr2', 50): 10,
('chr2', 45): 11,
('chr2', 65): 12
}
print('test')
print(locs)
print('control')
print(ctrl_locs)
assert(ctrl_locs == locs)
# tests create_transcript_edge_dict empty swangraph
def test_create_transcript_edge_dict_emtpy_sg(self):
transcripts, exons = get_test_transcript_exon_dicts()
sg = swan.SwanGraph()
locs = sg.create_loc_dict(exons)
transcripts, edges = sg.create_transcript_edge_dicts(transcripts, exons, locs)
# just compare the paths for the transcripts, which is the only
# part modified by this function
transcripts = dict([(key, item['path']) for key, item in transcripts.items()])
ctrl_transcript_paths = {
'test1': [0,1,2,3,4],
'test2': [5,6,7,8,9],
'test3': [5,6,10,11,9],
'test4': [12],
'test5': [5,13,14]
}
assert(transcripts == ctrl_transcript_paths)
ctrl_edges = {
('chr1', 1, 20, '+', 'exon'): {
'edge_id': 0,
'edge_type': 'exon',
'v1': 0,
'v2': 1
},
('chr1', 20, 25, '+', 'intron'): {
'edge_id': 1,
'edge_type': 'intron',
'v1': 1,
'v2': 2
},
('chr1', 25, 30, '+', 'exon'): {
'edge_id': 2,
'edge_type': 'exon',
'v1': 2,
'v2': 3
},
('chr1', 30, 35, '+', 'intron'): {
'edge_id': 3,
'edge_type': 'intron',
'v1': 3,
'v2': 4
},
('chr1', 35, 40, '+', 'exon'): {
'edge_id': 4,
'edge_type': 'exon',
'v1': 4,
'v2': 5
},
('chr2', 100, 80, '-', 'exon'): {
'edge_id': 5,
'edge_type': 'exon',
'v1': 6,
'v2': 7
},
('chr2', 80, 75, '-', 'intron'): {
'edge_id': 6,
'edge_type': 'intron',
'v1': 7,
'v2': 8
},
('chr2', 75, 60, '-', 'exon'): {
'edge_id': 7,
'edge_type': 'exon' ,
'v1': 8,
'v2': 9
},
('chr2', 60, 50, '-', 'intron'): {
'edge_id': 8,
'edge_type': 'intron',
'v1': 9,
'v2': 10
},
('chr2', 50, 45, '-', 'exon'): {
'edge_id': 9,
'edge_type': 'exon',
'v1': 10,
'v2': 11
},
('chr2', 75, 65, '-', 'exon'): {
'edge_id': 10,
'edge_type': 'exon',
'v1': 8,
'v2': 12
},
('chr2', 65, 50, '-', 'intron'): {
'edge_id': 11,
'edge_type': 'intron',
'v1': 12,
'v2': 10
},
('chr2', 45, 50, '+', 'exon'): {
'edge_id': 12,
'edge_type': 'exon',
'v1': 11,
'v2': 10
},
('chr2', 80, 60, '-', 'intron'): {
'edge_id': 13,
'edge_type': 'intron',
'v1': 7,
'v2': 9
},
('chr2', 60, 50, '-', 'exon'): {
'edge_id': 14,
'edge_type': 'exon',
'v1': 9,
'v2': 10
}
}
assert(edges == ctrl_edges)
# tests create_transcript_edge_dict with edges already in swangraph
def test_create_transcript_edge_dict_edge_sg(self):
transcripts, exons = get_test_transcript_exon_dicts()
# add some dummy data
sg = swan.SwanGraph()
data = [[0, 'chr1', 1],
[1, 'chr2', 20],
[2, 'chr2', 100],
[3, 'chr2', 80]]
columns = ['vertex_id', 'chrom', 'coord']
sg.loc_df = pd.DataFrame(data=data, columns=columns)
cols = ['tid']
data = [0]
sg.t_df = pd.DataFrame(data=data, columns=cols)
locs = sg.create_loc_dict(exons)
data = [[0, 0, 1, '+', 'exon'],
[1, 2, 3, '-', 'exon']]
columns = ['edge_id', 'v1', 'v2', 'strand', 'edge_type']
sg.edge_df = pd.DataFrame(data=data, columns=columns)
transcripts, edges = sg.create_transcript_edge_dicts(transcripts, exons, locs)
# just compare the paths for the transcripts, which is the only
# part modified by this function
transcripts = dict([(key, item['path']) for key, item in transcripts.items()])
ctrl_transcript_paths = {
'test1': [0,2,3,4,5],
'test2': [1,6,7,8,9],
'test3': [1,6,10,11,9],
'test4': [12],
'test5': [1,13,14]
}
assert(transcripts == ctrl_transcript_paths)
ctrl_edges = {
('chr1', 1, 20, '+', 'exon'): {
'edge_id': 0,
'edge_type': 'exon',
'v1': 0,
'v2': 1
},
('chr1', 20, 25, '+', 'intron'): {
'edge_id': 2,
'edge_type': 'intron',
'v1': 4,
'v2': 5
},
('chr1', 25, 30, '+', 'exon'): {
'edge_id': 3,
'edge_type': 'exon',
'v1': 5,
'v2': 6
},
('chr1', 30, 35, '+', 'intron'): {
'edge_id': 4,
'edge_type': 'intron',
'v1': 6,
'v2': 7
},
('chr1', 35, 40, '+', 'exon'): {
'edge_id': 5,
'edge_type': 'exon',
'v1': 7,
'v2': 8
},
('chr2', 100, 80, '-', 'exon'): {
'edge_id': 1,
'edge_type': 'exon',
'v1': 2,
'v2': 3
},
('chr2', 80, 75, '-', 'intron'): {
'edge_id': 6,
'edge_type': 'intron',
'v1': 3,
'v2': 9
},
('chr2', 75, 60, '-', 'exon'): {
'edge_id': 7,
'edge_type': 'exon' ,
'v1': 9,
'v2': 10
},
('chr2', 60, 50, '-', 'intron'): {
'edge_id': 8,
'edge_type': 'intron',
'v1': 10,
'v2': 11
},
('chr2', 50, 45, '-', 'exon'): {
'edge_id': 9,
'edge_type': 'exon',
'v1': 11,
'v2': 12
},
('chr2', 75, 65, '-', 'exon'): {
'edge_id': 10,
'edge_type': 'exon',
'v1': 9,
'v2': 13
},
('chr2', 65, 50, '-', 'intron'): {
'edge_id': 11,
'edge_type': 'intron',
'v1': 13,
'v2': 11
},
('chr2', 45, 50, '+', 'exon'): {
'edge_id': 12,
'edge_type': 'exon',
'v1': 12,
'v2': 11
},
('chr2', 80, 60, '-', 'intron'): {
'edge_id': 13,
'edge_type': 'intron',
'v1': 3,
'v2': 10
},
('chr2', 60, 50, '-', 'exon'): {
'edge_id': 14,
'edge_type': 'exon',
'v1': 10,
'v2': 11
}
}
assert(edges == ctrl_edges)
# # tests create_transcript_edge_dict where transcripts already
# # # exist in the swangraph
# # def test_create_transcript_edge_dict_edge_t_sg(self):
# # pass
# # # TODO
#
# tests create_dfs with an empty sg
# also ensures that empty dict -> df -> dict conversion doesn't screw up
def test_create_dfs_empty_sg(self):
transcripts, exons = get_test_transcript_exon_dicts()
sg = swan.SwanGraph()
loc_df, edge_df, t_df = sg.create_dfs(transcripts, exons, False)
ctrl_loc_df = pd.read_csv('files/test_loc_df.tsv', sep='\t')
ctrl_loc_df.set_index('vertex_id_index', inplace=True)
ctrl_loc_df.index.name = 'vertex_id'
# remove the columns that are there just for debugging purposes
ctrl_edge_df = pd.read_csv('files/test_edge_df.tsv', sep='\t')
ctrl_edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
ctrl_edge_df.set_index('edge_id_index', inplace=True)
ctrl_edge_df.index.name = 'edge_id'
# again, remove and reformat columns that are there for debugging
ctrl_t_df = pd.read_csv('files/test_t_df.tsv', sep='\t')
ctrl_t_df.set_index('tid_index', inplace=True)
ctrl_t_df.index.name = 'tid'
ctrl_t_df.drop(['loc_path', 'novelty'], axis=1, inplace=True)
ctrl_t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
ctrl_t_df['path'] = ctrl_t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
check_dfs(loc_df, ctrl_loc_df, edge_df, ctrl_edge_df, t_df, ctrl_t_df)
# tests create_dfs when from_talon = True
def test_create_dfs_empty_sg_from_talon(self):
transcripts, exons = get_test_transcript_exon_dicts()
sg = swan.SwanGraph()
loc_df, edge_df, t_df = sg.create_dfs(transcripts, exons, True)
ctrl_loc_df = pd.read_csv('files/test_loc_df.tsv', sep='\t')
ctrl_loc_df.set_index('vertex_id_index', inplace=True)
ctrl_loc_df.index.name = 'vertex_id'
# remove the columns that are there just for debugging purposes
ctrl_edge_df = pd.read_csv('files/test_edge_df.tsv', sep='\t')
ctrl_edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
ctrl_edge_df.set_index('edge_id_index', inplace=True)
ctrl_edge_df.index.name = 'edge_id'
# again, remove and reformat columns that are there for debugging
ctrl_t_df = pd.read_csv('files/test_t_df.tsv', sep='\t')
ctrl_t_df.set_index('tid_index', inplace=True)
ctrl_t_df.index.name = 'tid'
ctrl_t_df.drop(['loc_path'], axis=1, inplace=True)
ctrl_t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
ctrl_t_df['path'] = ctrl_t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
ctrl_t_df = ctrl_t_df[t_df.columns]
check_dfs(loc_df, ctrl_loc_df, edge_df, ctrl_edge_df, t_df, ctrl_t_df)
# tests create_dfs in a swangraph with data
def test_create_dfs_data_sg(self):
transcripts, exons = get_test_transcript_exon_dicts()
del transcripts['test2']
sg = swan.SwanGraph()
# add dummy data
# loc_df - format
loc_df = pd.read_csv('files/test_preexisting_loc_df.tsv', sep='\t')
loc_df.set_index('vertex_id_index', inplace=True)
loc_df.index.name = 'vertex_id'
# edge_df - format and remove the columns that are there
# just for debugging purposes
edge_df = pd.read_csv('files/test_preexisting_edge_df.tsv', sep='\t')
edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
edge_df.set_index('edge_id_index', inplace=True)
edge_df.index.name = 'edge_id'
# t_df - remove and reformat columns that are there for debugging
t_df = pd.read_csv('files/test_preexisting_t_df.tsv', sep='\t')
t_df.set_index('tid_index', inplace=True)
t_df.index.name = 'tid'
t_df.drop(['loc_path'], axis=1, inplace=True)
t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
t_df['path'] = t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
t_df = t_df[t_df.columns]
sg.loc_df = loc_df
sg.edge_df = edge_df
sg.t_df = t_df
loc_df, edge_df, t_df = sg.create_dfs(transcripts, exons, True)
# control data
# loc_df - format
ctrl_loc_df = pd.read_csv('files/test_preexisting_result_loc_df.tsv', sep='\t')
ctrl_loc_df.set_index('vertex_id_index', inplace=True)
ctrl_loc_df.index.name = 'vertex_id'
# edge_df - format and remove the columns that are there
# just for debugging purposes
ctrl_edge_df = pd.read_csv('files/test_preexisting_result_edge_df.tsv', sep='\t')
ctrl_edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
ctrl_edge_df.set_index('edge_id_index', inplace=True)
ctrl_edge_df.index.name = 'edge_id'
# t_df - remove and reformat columns that are there for debugging
ctrl_t_df = pd.read_csv('files/test_preexisting_result_t_df.tsv', sep='\t')
ctrl_t_df.set_index('tid_index', inplace=True)
ctrl_t_df.index.name = 'tid'
ctrl_t_df.drop(['loc_path'], axis=1, inplace=True)
ctrl_t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
ctrl_t_df['path'] = ctrl_t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
ctrl_t_df = ctrl_t_df[t_df.columns]
check_dfs(loc_df, ctrl_loc_df, edge_df, ctrl_edge_df, t_df, ctrl_t_df)
# tests create_dfs in sg with data where existing data has novelty
# and added dataset does not
def test_create_dfs_data_sg_nov1(self):
transcripts, exons = get_test_transcript_exon_dicts()
# to do - remove transcript that's already there
sg = swan.SwanGraph()
# add dummy data
# loc_df - format
loc_df = pd.read_csv('files/test_preexisting_loc_df.tsv', sep='\t')
loc_df.set_index('vertex_id_index', inplace=True)
loc_df.index.name = 'vertex_id'
# edge_df - format and remove the columns that are there
# just for debugging purposes
edge_df = pd.read_csv('files/test_preexisting_edge_df.tsv', sep='\t')
edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
edge_df.set_index('edge_id_index', inplace=True)
edge_df.index.name = 'edge_id'
# t_df - remove and reformat columns that are there for debugging
t_df = pd.read_csv('files/test_preexisting_t_df.tsv', sep='\t')
t_df.set_index('tid_index', inplace=True)
t_df.index.name = 'tid'
t_df.drop(['loc_path'], axis=1, inplace=True)
t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
t_df['path'] = t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
t_df = t_df[t_df.columns]
sg.loc_df = loc_df
sg.edge_df = edge_df
sg.t_df = t_df
loc_df, edge_df, t_df = sg.create_dfs(transcripts, exons, False)
# control data
# loc_df - format
ctrl_loc_df = pd.read_csv('files/test_preexisting_result_loc_df.tsv', sep='\t')
ctrl_loc_df.set_index('vertex_id_index', inplace=True)
ctrl_loc_df.index.name = 'vertex_id'
# edge_df - format and remove the columns that are there
# just for debugging purposes
ctrl_edge_df = pd.read_csv('files/test_preexisting_result_edge_df.tsv', sep='\t')
ctrl_edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
ctrl_edge_df.set_index('edge_id_index', inplace=True)
ctrl_edge_df.index.name = 'edge_id'
# t_df - remove and reformat columns that are there for debugging
ctrl_t_df = pd.read_csv('files/test_preexisting_result_t_df.tsv', sep='\t')
ctrl_t_df.set_index('tid_index', inplace=True)
ctrl_t_df.index.name = 'tid'
ctrl_t_df.drop(['loc_path'], axis=1, inplace=True)
ctrl_t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
ctrl_t_df['path'] = ctrl_t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
ctrl_t_df = ctrl_t_df[t_df.columns]
# remove novelty for entries that are new
new_tids = ['test1', 'test3', 'test4', 'test5']
ctrl_t_df.loc[ctrl_t_df.tid.isin(new_tids), 'novelty'] = 'Undefined'
check_dfs(loc_df, ctrl_loc_df, edge_df, ctrl_edge_df, t_df, ctrl_t_df)
# tests create_dfs with preexisting data and a duplicate transcript
# being added
# also tests that old data (novelty in this case) is not overwritten
def test_create_dfs_data_sg_dupe(self):
transcripts, exons = get_test_transcript_exon_dicts()
sg = swan.SwanGraph()
# add dummy data
# loc_df - format
loc_df = pd.read_csv('files/test_preexisting_loc_df.tsv', sep='\t')
loc_df.set_index('vertex_id_index', inplace=True)
loc_df.index.name = 'vertex_id'
# edge_df - format and remove the columns that are there
# just for debugging purposes
edge_df = pd.read_csv('files/test_preexisting_edge_df.tsv', sep='\t')
edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
edge_df.set_index('edge_id_index', inplace=True)
edge_df.index.name = 'edge_id'
# t_df - remove and reformat columns that are there for debugging
t_df = pd.read_csv('files/test_preexisting_t_df.tsv', sep='\t')
t_df.set_index('tid_index', inplace=True)
t_df.index.name = 'tid'
t_df.drop(['loc_path'], axis=1, inplace=True)
t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
t_df['path'] = t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
t_df = t_df[t_df.columns]
sg.loc_df = loc_df
sg.edge_df = edge_df
sg.t_df = t_df
loc_df, edge_df, t_df = sg.create_dfs(transcripts, exons, False)
# control data
# loc_df - format
ctrl_loc_df = | pd.read_csv('files/test_preexisting_result_loc_df.tsv', sep='\t') | pandas.read_csv |
import warnings
import itertools
from copy import copy
from functools import partial
from collections import UserString
from collections.abc import Iterable, Sequence, Mapping
from numbers import Number
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import matplotlib as mpl
from ._decorators import (
share_init_params_with_map,
)
from .palettes import (
QUAL_PALETTES,
color_palette,
)
from .utils import (
_check_argument,
get_color_cycle,
remove_na,
)
class SemanticMapping:
"""Base class for mapping data values to plot attributes."""
# -- Default attributes that all SemanticMapping subclasses must set
# Whether the mapping is numeric, categorical, or datetime
map_type = None
# Ordered list of unique values in the input data
levels = None
# A mapping from the data values to corresponding plot attributes
lookup_table = None
def __init__(self, plotter):
# TODO Putting this here so we can continue to use a lot of the
# logic that's built into the library, but the idea of this class
# is to move towards semantic mappings that are agnostic about the
# kind of plot they're going to be used to draw.
# Fully achieving that is going to take some thinking.
self.plotter = plotter
def map(cls, plotter, *args, **kwargs):
# This method is assigned the __init__ docstring
method_name = "_{}_map".format(cls.__name__[:-7].lower())
setattr(plotter, method_name, cls(plotter, *args, **kwargs))
return plotter
def _lookup_single(self, key):
"""Apply the mapping to a single data value."""
return self.lookup_table[key]
def __call__(self, key, *args, **kwargs):
"""Get the attribute(s) values for the data key."""
if isinstance(key, (list, np.ndarray, pd.Series)):
return [self._lookup_single(k, *args, **kwargs) for k in key]
else:
return self._lookup_single(key, *args, **kwargs)
@share_init_params_with_map
class HueMapping(SemanticMapping):
"""Mapping that sets artist colors according to data values."""
# A specification of the colors that should appear in the plot
palette = None
# An object that normalizes data values to [0, 1] range for color mapping
norm = None
# A continuous colormap object for interpolating in a numeric context
cmap = None
def __init__(
self, plotter, palette=None, order=None, norm=None,
):
"""Map the levels of the `hue` variable to distinct colors.
Parameters
----------
# TODO add generic parameters
"""
super().__init__(plotter)
data = plotter.plot_data.get("hue", pd.Series(dtype=float))
if data.notna().any():
map_type = self.infer_map_type(
palette, norm, plotter.input_format, plotter.var_types["hue"]
)
# Our goal is to end up with a dictionary mapping every unique
# value in `data` to a color. We will also keep track of the
# metadata about this mapping we will need for, e.g., a legend
# --- Option 1: numeric mapping with a matplotlib colormap
if map_type == "numeric":
data = pd.to_numeric(data)
levels, lookup_table, norm, cmap = self.numeric_mapping(
data, palette, norm,
)
# --- Option 2: categorical mapping using seaborn palette
elif map_type == "categorical":
cmap = norm = None
levels, lookup_table = self.categorical_mapping(
data, palette, order,
)
# --- Option 3: datetime mapping
else:
# TODO this needs actual implementation
cmap = norm = None
levels, lookup_table = self.categorical_mapping(
# Casting data to list to handle differences in the way
# pandas and numpy represent datetime64 data
list(data), palette, order,
)
self.map_type = map_type
self.lookup_table = lookup_table
self.palette = palette
self.levels = levels
self.norm = norm
self.cmap = cmap
def _lookup_single(self, key):
"""Get the color for a single value, using colormap to interpolate."""
try:
# Use a value that's in the original data vector
value = self.lookup_table[key]
except KeyError:
# Use the colormap to interpolate between existing datapoints
# (e.g. in the context of making a continuous legend)
try:
normed = self.norm(key)
except TypeError as err:
if np.isnan(key):
value = (0, 0, 0, 0)
else:
raise err
else:
if np.ma.is_masked(normed):
normed = np.nan
value = self.cmap(normed)
return value
def infer_map_type(self, palette, norm, input_format, var_type):
"""Determine how to implement the mapping."""
if palette in QUAL_PALETTES:
map_type = "categorical"
elif norm is not None:
map_type = "numeric"
elif isinstance(palette, (dict, list)):
map_type = "categorical"
elif input_format == "wide":
map_type = "categorical"
else:
map_type = var_type
return map_type
def categorical_mapping(self, data, palette, order):
"""Determine colors when the hue mapping is categorical."""
# -- Identify the order and name of the levels
levels = categorical_order(data, order)
n_colors = len(levels)
# -- Identify the set of colors to use
if isinstance(palette, dict):
missing = set(levels) - set(palette)
if any(missing):
err = "The palette dictionary is missing keys: {}"
raise ValueError(err.format(missing))
lookup_table = palette
else:
if palette is None:
if n_colors <= len(get_color_cycle()):
colors = color_palette(None, n_colors)
else:
colors = color_palette("husl", n_colors)
elif isinstance(palette, list):
if len(palette) != n_colors:
err = "The palette list has the wrong number of colors."
raise ValueError(err)
colors = palette
else:
colors = color_palette(palette, n_colors)
lookup_table = dict(zip(levels, colors))
return levels, lookup_table
def numeric_mapping(self, data, palette, norm):
"""Determine colors when the hue variable is quantitative."""
if isinstance(palette, dict):
# The presence of a norm object overrides a dictionary of hues
# in specifying a numeric mapping, so we need to process it here.
levels = list(sorted(palette))
colors = [palette[k] for k in sorted(palette)]
cmap = mpl.colors.ListedColormap(colors)
lookup_table = palette.copy()
else:
# The levels are the sorted unique values in the data
levels = list(np.sort(remove_na(data.unique())))
# --- Sort out the colormap to use from the palette argument
# Default numeric palette is our default cubehelix palette
# TODO do we want to do something complicated to ensure contrast?
palette = "ch:" if palette is None else palette
if isinstance(palette, mpl.colors.Colormap):
cmap = palette
else:
cmap = color_palette(palette, as_cmap=True)
# Now sort out the data normalization
if norm is None:
norm = mpl.colors.Normalize()
elif isinstance(norm, tuple):
norm = mpl.colors.Normalize(*norm)
elif not isinstance(norm, mpl.colors.Normalize):
err = "``hue_norm`` must be None, tuple, or Normalize object."
raise ValueError(err)
if not norm.scaled():
norm(np.asarray(data.dropna()))
lookup_table = dict(zip(levels, cmap(norm(levels))))
return levels, lookup_table, norm, cmap
@share_init_params_with_map
class SizeMapping(SemanticMapping):
"""Mapping that sets artist sizes according to data values."""
# An object that normalizes data values to [0, 1] range
norm = None
def __init__(
self, plotter, sizes=None, order=None, norm=None,
):
"""Map the levels of the `size` variable to distinct values.
Parameters
----------
# TODO add generic parameters
"""
super().__init__(plotter)
data = plotter.plot_data.get("size", pd.Series(dtype=float))
if data.notna().any():
map_type = self.infer_map_type(
norm, sizes, plotter.var_types["size"]
)
# --- Option 1: numeric mapping
if map_type == "numeric":
levels, lookup_table, norm, size_range = self.numeric_mapping(
data, sizes, norm,
)
# --- Option 2: categorical mapping
elif map_type == "categorical":
levels, lookup_table = self.categorical_mapping(
data, sizes, order,
)
size_range = None
# --- Option 3: datetime mapping
# TODO this needs an actual implementation
else:
levels, lookup_table = self.categorical_mapping(
# Casting data to list to handle differences in the way
# pandas and numpy represent datetime64 data
list(data), sizes, order,
)
size_range = None
self.map_type = map_type
self.levels = levels
self.norm = norm
self.sizes = sizes
self.size_range = size_range
self.lookup_table = lookup_table
def infer_map_type(self, norm, sizes, var_type):
if norm is not None:
map_type = "numeric"
elif isinstance(sizes, (dict, list)):
map_type = "categorical"
else:
map_type = var_type
return map_type
def _lookup_single(self, key):
try:
value = self.lookup_table[key]
except KeyError:
normed = self.norm(key)
if np.ma.is_masked(normed):
normed = np.nan
value = self.size_range[0] + normed * np.ptp(self.size_range)
return value
def categorical_mapping(self, data, sizes, order):
levels = categorical_order(data, order)
if isinstance(sizes, dict):
# Dict inputs map existing data values to the size attribute
missing = set(levels) - set(sizes)
if any(missing):
err = f"Missing sizes for the following levels: {missing}"
raise ValueError(err)
lookup_table = sizes.copy()
elif isinstance(sizes, list):
# List inputs give size values in the same order as the levels
if len(sizes) != len(levels):
err = "The `sizes` list has the wrong number of values."
raise ValueError(err)
lookup_table = dict(zip(levels, sizes))
else:
if isinstance(sizes, tuple):
# Tuple input sets the min, max size values
if len(sizes) != 2:
err = "A `sizes` tuple must have only 2 values"
raise ValueError(err)
elif sizes is not None:
err = f"Value for `sizes` not understood: {sizes}"
raise ValueError(err)
else:
# Otherwise, we need to get the min, max size values from
# the plotter object we are attached to.
# TODO this is going to cause us trouble later, because we
# want to restructure things so that the plotter is generic
# across the visual representation of the data. But at this
# point, we don't know the visual representation. Likely we
# want to change the logic of this Mapping so that it gives
# points on a normalized range that then gets un-normalized
# when we know what we're drawing. But given the way the
# package works now, this way is cleanest.
sizes = self.plotter._default_size_range
# For categorical sizes, use regularly-spaced linear steps
# between the minimum and maximum sizes. Then reverse the
# ramp so that the largest value is used for the first entry
# in size_order, etc. This is because "ordered" categories
# are often though to go in decreasing priority.
sizes = np.linspace(*sizes, len(levels))[::-1]
lookup_table = dict(zip(levels, sizes))
return levels, lookup_table
def numeric_mapping(self, data, sizes, norm):
if isinstance(sizes, dict):
# The presence of a norm object overrides a dictionary of sizes
# in specifying a numeric mapping, so we need to process it
# dictionary here
levels = list(np.sort(list(sizes)))
size_values = sizes.values()
size_range = min(size_values), max(size_values)
else:
# The levels here will be the unique values in the data
levels = list(np.sort(remove_na(data.unique())))
if isinstance(sizes, tuple):
# For numeric inputs, the size can be parametrized by
# the minimum and maximum artist values to map to. The
# norm object that gets set up next specifies how to
# do the mapping.
if len(sizes) != 2:
err = "A `sizes` tuple must have only 2 values"
raise ValueError(err)
size_range = sizes
elif sizes is not None:
err = f"Value for `sizes` not understood: {sizes}"
raise ValueError(err)
else:
# When not provided, we get the size range from the plotter
# object we are attached to. See the note in the categorical
# method about how this is suboptimal for future development.
size_range = self.plotter._default_size_range
# Now that we know the minimum and maximum sizes that will get drawn,
# we need to map the data values that we have into that range. We will
# use a matplotlib Normalize class, which is typically used for numeric
# color mapping but works fine here too. It takes data values and maps
# them into a [0, 1] interval, potentially nonlinear-ly.
if norm is None:
# Default is a linear function between the min and max data values
norm = mpl.colors.Normalize()
elif isinstance(norm, tuple):
# It is also possible to give different limits in data space
norm = mpl.colors.Normalize(*norm)
elif not isinstance(norm, mpl.colors.Normalize):
err = f"Value for size `norm` parameter not understood: {norm}"
raise ValueError(err)
else:
# If provided with Normalize object, copy it so we can modify
norm = copy(norm)
# Set the mapping so all output values are in [0, 1]
norm.clip = True
# If the input range is not set, use the full range of the data
if not norm.scaled():
norm(levels)
# Map from data values to [0, 1] range
sizes_scaled = norm(levels)
# Now map from the scaled range into the artist units
if isinstance(sizes, dict):
lookup_table = sizes
else:
lo, hi = size_range
sizes = lo + sizes_scaled * (hi - lo)
lookup_table = dict(zip(levels, sizes))
return levels, lookup_table, norm, size_range
@share_init_params_with_map
class StyleMapping(SemanticMapping):
"""Mapping that sets artist style according to data values."""
# Style mapping is always treated as categorical
map_type = "categorical"
def __init__(
self, plotter, markers=None, dashes=None, order=None,
):
"""Map the levels of the `style` variable to distinct values.
Parameters
----------
# TODO add generic parameters
"""
super().__init__(plotter)
data = plotter.plot_data.get("style", pd.Series(dtype=float))
if data.notna().any():
# Cast to list to handle numpy/pandas datetime quirks
if variable_type(data) == "datetime":
data = list(data)
# Find ordered unique values
levels = categorical_order(data, order)
markers = self._map_attributes(
markers, levels, unique_markers(len(levels)), "markers",
)
dashes = self._map_attributes(
dashes, levels, unique_dashes(len(levels)), "dashes",
)
# Build the paths matplotlib will use to draw the markers
paths = {}
filled_markers = []
for k, m in markers.items():
if not isinstance(m, mpl.markers.MarkerStyle):
m = mpl.markers.MarkerStyle(m)
paths[k] = m.get_path().transformed(m.get_transform())
filled_markers.append(m.is_filled())
# Mixture of filled and unfilled markers will show line art markers
# in the edge color, which defaults to white. This can be handled,
# but there would be additional complexity with specifying the
# weight of the line art markers without overwhelming the filled
# ones with the edges. So for now, we will disallow mixtures.
if any(filled_markers) and not all(filled_markers):
err = "Filled and line art markers cannot be mixed"
raise ValueError(err)
lookup_table = {}
for key in levels:
lookup_table[key] = {}
if markers:
lookup_table[key]["marker"] = markers[key]
lookup_table[key]["path"] = paths[key]
if dashes:
lookup_table[key]["dashes"] = dashes[key]
self.levels = levels
self.lookup_table = lookup_table
def _lookup_single(self, key, attr=None):
"""Get attribute(s) for a given data point."""
if attr is None:
value = self.lookup_table[key]
else:
value = self.lookup_table[key][attr]
return value
def _map_attributes(self, arg, levels, defaults, attr):
"""Handle the specification for a given style attribute."""
if arg is True:
lookup_table = dict(zip(levels, defaults))
elif isinstance(arg, dict):
missing = set(levels) - set(arg)
if missing:
err = f"These `{attr}` levels are missing values: {missing}"
raise ValueError(err)
lookup_table = arg
elif isinstance(arg, Sequence):
if len(levels) != len(arg):
err = f"The `{attr}` argument has the wrong number of values"
raise ValueError(err)
lookup_table = dict(zip(levels, arg))
elif arg:
err = f"This `{attr}` argument was not understood: {arg}"
raise ValueError(err)
else:
lookup_table = {}
return lookup_table
# =========================================================================== #
class VectorPlotter:
"""Base class for objects underlying *plot functions."""
_semantic_mappings = {
"hue": HueMapping,
"size": SizeMapping,
"style": StyleMapping,
}
# TODO units is another example of a non-mapping "semantic"
# we need a general name for this and separate handling
semantics = "x", "y", "hue", "size", "style", "units"
wide_structure = {
"x": "@index", "y": "@values", "hue": "@columns", "style": "@columns",
}
flat_structure = {"x": "@index", "y": "@values"}
_default_size_range = 1, 2 # Unused but needed in tests, ugh
def __init__(self, data=None, variables={}):
self._var_levels = {}
# var_ordered is relevant only for categorical axis variables, and may
# be better handled by an internal axis information object that tracks
# such information and is set up by the scale_* methods. The analogous
# information for numeric axes would be information about log scales.
self._var_ordered = {"x": False, "y": False} # alt., used DefaultDict
self.assign_variables(data, variables)
for var, cls in self._semantic_mappings.items():
# Create the mapping function
map_func = partial(cls.map, plotter=self)
setattr(self, f"map_{var}", map_func)
# Call the mapping function to initialize with default values
getattr(self, f"map_{var}")()
@classmethod
def get_semantics(cls, kwargs, semantics=None):
"""Subset a dictionary` arguments with known semantic variables."""
# TODO this should be get_variables since we have included x and y
if semantics is None:
semantics = cls.semantics
variables = {}
for key, val in kwargs.items():
if key in semantics and val is not None:
variables[key] = val
return variables
@property
def has_xy_data(self):
"""Return True at least one of x or y is defined."""
return bool({"x", "y"} & set(self.variables))
@property
def var_levels(self):
"""Property interface to ordered list of variables levels.
Each time it's accessed, it updates the var_levels dictionary with the
list of levels in the current semantic mappers. But it also allows the
dictionary to persist, so it can be used to set levels by a key. This is
used to track the list of col/row levels using an attached FacetGrid
object, but it's kind of messy and ideally fixed by improving the
faceting logic so it interfaces better with the modern approach to
tracking plot variables.
"""
for var in self.variables:
try:
map_obj = getattr(self, f"_{var}_map")
self._var_levels[var] = map_obj.levels
except AttributeError:
pass
return self._var_levels
def assign_variables(self, data=None, variables={}):
"""Define plot variables, optionally using lookup from `data`."""
x = variables.get("x", None)
y = variables.get("y", None)
if x is None and y is None:
self.input_format = "wide"
plot_data, variables = self._assign_variables_wideform(
data, **variables,
)
else:
self.input_format = "long"
plot_data, variables = self._assign_variables_longform(
data, **variables,
)
self.plot_data = plot_data
self.variables = variables
self.var_types = {
v: variable_type(
plot_data[v],
boolean_type="numeric" if v in "xy" else "categorical"
)
for v in variables
}
return self
def _assign_variables_wideform(self, data=None, **kwargs):
"""Define plot variables given wide-form data.
Parameters
----------
data : flat vector or collection of vectors
Data can be a vector or mapping that is coerceable to a Series
or a sequence- or mapping-based collection of such vectors, or a
rectangular numpy array, or a Pandas DataFrame.
kwargs : variable -> data mappings
Behavior with keyword arguments is currently undefined.
Returns
-------
plot_data : :class:`pandas.DataFrame`
Long-form data object mapping seaborn variables (x, y, hue, ...)
to data vectors.
variables : dict
Keys are defined seaborn variables; values are names inferred from
the inputs (or None when no name can be determined).
"""
# Raise if semantic or other variables are assigned in wide-form mode
assigned = [k for k, v in kwargs.items() if v is not None]
if any(assigned):
s = "s" if len(assigned) > 1 else ""
err = f"The following variable{s} cannot be assigned with wide-form data: "
err += ", ".join(f"`{v}`" for v in assigned)
raise ValueError(err)
# Determine if the data object actually has any data in it
empty = data is None or not len(data)
# Then, determine if we have "flat" data (a single vector)
if isinstance(data, dict):
values = data.values()
else:
values = np.atleast_1d(np.asarray(data, dtype=object))
flat = not any(
isinstance(v, Iterable) and not isinstance(v, (str, bytes))
for v in values
)
if empty:
# Make an object with the structure of plot_data, but empty
plot_data = pd.DataFrame()
variables = {}
elif flat:
# Handle flat data by converting to pandas Series and using the
# index and/or values to define x and/or y
# (Could be accomplished with a more general to_series() interface)
flat_data = pd.Series(data).copy()
names = {
"@values": flat_data.name,
"@index": flat_data.index.name
}
plot_data = {}
variables = {}
for var in ["x", "y"]:
if var in self.flat_structure:
attr = self.flat_structure[var]
plot_data[var] = getattr(flat_data, attr[1:])
variables[var] = names[self.flat_structure[var]]
plot_data = pd.DataFrame(plot_data)
else:
# Otherwise assume we have some collection of vectors.
# Handle Python sequences such that entries end up in the columns,
# not in the rows, of the intermediate wide DataFrame.
# One way to accomplish this is to convert to a dict of Series.
if isinstance(data, Sequence):
data_dict = {}
for i, var in enumerate(data):
key = getattr(var, "name", i)
# TODO is there a safer/more generic way to ensure Series?
# sort of like np.asarray, but for pandas?
data_dict[key] = pd.Series(var)
data = data_dict
# Pandas requires that dict values either be Series objects
# or all have the same length, but we want to allow "ragged" inputs
if isinstance(data, Mapping):
data = {key: pd.Series(val) for key, val in data.items()}
# Otherwise, delegate to the pandas DataFrame constructor
# This is where we'd prefer to use a general interface that says
# "give me this data as a pandas DataFrame", so we can accept
# DataFrame objects from other libraries
wide_data = pd.DataFrame(data, copy=True)
# At this point we should reduce the dataframe to numeric cols
numeric_cols = [
k for k, v in wide_data.items() if variable_type(v) == "numeric"
]
wide_data = wide_data[numeric_cols]
# Now melt the data to long form
melt_kws = {"var_name": "@columns", "value_name": "@values"}
use_index = "@index" in self.wide_structure.values()
if use_index:
melt_kws["id_vars"] = "@index"
try:
orig_categories = wide_data.columns.categories
orig_ordered = wide_data.columns.ordered
wide_data.columns = wide_data.columns.add_categories("@index")
except AttributeError:
category_columns = False
else:
category_columns = True
wide_data["@index"] = wide_data.index.to_series()
plot_data = wide_data.melt(**melt_kws)
if use_index and category_columns:
plot_data["@columns"] = pd.Categorical(plot_data["@columns"],
orig_categories,
orig_ordered)
# Assign names corresponding to plot semantics
for var, attr in self.wide_structure.items():
plot_data[var] = plot_data[attr]
# Define the variable names
variables = {}
for var, attr in self.wide_structure.items():
obj = getattr(wide_data, attr[1:])
variables[var] = getattr(obj, "name", None)
# Remove redundant columns from plot_data
plot_data = plot_data[list(variables)]
return plot_data, variables
def _assign_variables_longform(self, data=None, **kwargs):
"""Define plot variables given long-form data and/or vector inputs.
Parameters
----------
data : dict-like collection of vectors
Input data where variable names map to vector values.
kwargs : variable -> data mappings
Keys are seaborn variables (x, y, hue, ...) and values are vectors
in any format that can construct a :class:`pandas.DataFrame` or
names of columns or index levels in ``data``.
Returns
-------
plot_data : :class:`pandas.DataFrame`
Long-form data object mapping seaborn variables (x, y, hue, ...)
to data vectors.
variables : dict
Keys are defined seaborn variables; values are names inferred from
the inputs (or None when no name can be determined).
Raises
------
ValueError
When variables are strings that don't appear in ``data``.
"""
plot_data = {}
variables = {}
# Data is optional; all variables can be defined as vectors
if data is None:
data = {}
# TODO should we try a data.to_dict() or similar here to more
# generally accept objects with that interface?
# Note that dict(df) also works for pandas, and gives us what we
# want, whereas DataFrame.to_dict() gives a nested dict instead of
# a dict of series.
# Variables can also be extraced from the index attribute
# TODO is this the most general way to enable it?
# There is no index.to_dict on multiindex, unfortunately
try:
index = data.index.to_frame()
except AttributeError:
index = {}
# The caller will determine the order of variables in plot_data
for key, val in kwargs.items():
# First try to treat the argument as a key for the data collection.
# But be flexible about what can be used as a key.
# Usually it will be a string, but allow numbers or tuples too when
# taking from the main data object. Only allow strings to reference
# fields in the index, because otherwise there is too much ambiguity.
try:
val_as_data_key = (
val in data
or (isinstance(val, (str, bytes)) and val in index)
)
except (KeyError, TypeError):
val_as_data_key = False
if val_as_data_key:
# We know that __getitem__ will work
if val in data:
plot_data[key] = data[val]
elif val in index:
plot_data[key] = index[val]
variables[key] = val
elif isinstance(val, (str, bytes)):
# This looks like a column name but we don't know what it means!
err = f"Could not interpret value `{val}` for parameter `{key}`"
raise ValueError(err)
else:
# Otherwise, assume the value is itself data
# Raise when data object is present and a vector can't matched
if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):
if np.ndim(val) and len(data) != len(val):
val_cls = val.__class__.__name__
err = (
f"Length of {val_cls} vectors must match length of `data`"
f" when both are used, but `data` has length {len(data)}"
f" and the vector passed to `{key}` has length {len(val)}."
)
raise ValueError(err)
plot_data[key] = val
# Try to infer the name of the variable
variables[key] = getattr(val, "name", None)
# Construct a tidy plot DataFrame. This will convert a number of
# types automatically, aligning on index in case of pandas objects
plot_data = pd.DataFrame(plot_data)
# Reduce the variables dictionary to fields with valid data
variables = {
var: name
for var, name in variables.items()
if plot_data[var].notnull().any()
}
return plot_data, variables
def iter_data(
self, grouping_vars=None, *,
reverse=False, from_comp_data=False,
by_facet=True, allow_empty=False, dropna=True,
):
"""Generator for getting subsets of data defined by semantic variables.
Also injects "col" and "row" into grouping semantics.
Parameters
----------
grouping_vars : string or list of strings
Semantic variables that define the subsets of data.
reverse : bool
If True, reverse the order of iteration.
from_comp_data : bool
If True, use self.comp_data rather than self.plot_data
by_facet : bool
If True, add faceting variables to the set of grouping variables.
allow_empty : bool
If True, yield an empty dataframe when no observations exist for
combinations of grouping variables.
dropna : bool
If True, remove rows with missing data.
Yields
------
sub_vars : dict
Keys are semantic names, values are the level of that semantic.
sub_data : :class:`pandas.DataFrame`
Subset of ``plot_data`` for this combination of semantic values.
"""
# TODO should this default to using all (non x/y?) semantics?
# or define groupping vars somewhere?
if grouping_vars is None:
grouping_vars = []
elif isinstance(grouping_vars, str):
grouping_vars = [grouping_vars]
elif isinstance(grouping_vars, tuple):
grouping_vars = list(grouping_vars)
# Always insert faceting variables
if by_facet:
facet_vars = {"col", "row"}
grouping_vars.extend(
facet_vars & set(self.variables) - set(grouping_vars)
)
# Reduce to the semantics used in this plot
grouping_vars = [
var for var in grouping_vars if var in self.variables
]
if from_comp_data:
data = self.comp_data
else:
data = self.plot_data
if dropna:
data = data.dropna()
levels = self.var_levels.copy()
if from_comp_data:
for axis in {"x", "y"} & set(grouping_vars):
if self.var_types[axis] == "categorical":
if self._var_ordered[axis]:
# If the axis is ordered, then the axes in a possible
# facet grid are by definition "shared", or there is a
# single axis with a unique cat -> idx mapping.
# So we can just take the first converter object.
converter = self.converters[axis].iloc[0]
levels[axis] = converter.convert_units(levels[axis])
else:
# Otherwise, the mappings may not be unique, but we can
# use the unique set of index values in comp_data.
levels[axis] = np.sort(data[axis].unique())
elif self.var_types[axis] == "datetime":
levels[axis] = mpl.dates.date2num(levels[axis])
elif self.var_types[axis] == "numeric" and self._log_scaled(axis):
levels[axis] = np.log10(levels[axis])
if grouping_vars:
grouped_data = data.groupby(
grouping_vars, sort=False, as_index=False
)
grouping_keys = []
for var in grouping_vars:
grouping_keys.append(levels.get(var, []))
iter_keys = itertools.product(*grouping_keys)
if reverse:
iter_keys = reversed(list(iter_keys))
for key in iter_keys:
# Pandas fails with singleton tuple inputs
pd_key = key[0] if len(key) == 1 else key
try:
data_subset = grouped_data.get_group(pd_key)
except KeyError:
# XXX we are adding this to allow backwards compatability
# with the empty artists that old categorical plots would
# add (before 0.12), which we may decide to break, in which
# case this option could be removed
data_subset = data.loc[[]]
if data_subset.empty and not allow_empty:
continue
sub_vars = dict(zip(grouping_vars, key))
yield sub_vars, data_subset.copy()
else:
yield {}, data.copy()
@property
def comp_data(self):
"""Dataframe with numeric x and y, after unit conversion and log scaling."""
if not hasattr(self, "ax"):
# Probably a good idea, but will need a bunch of tests updated
# Most of these tests should just use the external interface
# Then this can be re-enabled.
# raise AttributeError("No Axes attached to plotter")
return self.plot_data
if not hasattr(self, "_comp_data"):
comp_data = (
self.plot_data
.copy(deep=False)
.drop(["x", "y"], axis=1, errors="ignore")
)
for var in "yx":
if var not in self.variables:
continue
comp_col = pd.Series(index=self.plot_data.index, dtype=float, name=var)
grouped = self.plot_data[var].groupby(self.converters[var], sort=False)
for converter, orig in grouped:
with pd.option_context('mode.use_inf_as_null', True):
orig = orig.dropna()
if var in self.var_levels:
# TODO this should happen in some centralized location
# it is similar to GH2419, but more complicated because
# supporting `order` in categorical plots is tricky
orig = orig[orig.isin(self.var_levels[var])]
comp = pd.to_numeric(converter.convert_units(orig))
if converter.get_scale() == "log":
comp = np.log10(comp)
comp_col.loc[orig.index] = comp
comp_data.insert(0, var, comp_col)
self._comp_data = comp_data
return self._comp_data
def _get_axes(self, sub_vars):
"""Return an Axes object based on existence of row/col variables."""
row = sub_vars.get("row", None)
col = sub_vars.get("col", None)
if row is not None and col is not None:
return self.facets.axes_dict[(row, col)]
elif row is not None:
return self.facets.axes_dict[row]
elif col is not None:
return self.facets.axes_dict[col]
elif self.ax is None:
return self.facets.ax
else:
return self.ax
def _attach(
self,
obj,
allowed_types=None,
log_scale=None,
):
"""Associate the plotter with an Axes manager and initialize its units.
Parameters
----------
obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`
Structural object that we will eventually plot onto.
allowed_types : str or list of str
If provided, raise when either the x or y variable does not have
one of the declared seaborn types.
log_scale : bool, number, or pair of bools or numbers
If not False, set the axes to use log scaling, with the given
base or defaulting to 10. If a tuple, interpreted as separate
arguments for the x and y axes.
"""
from .axisgrid import FacetGrid
if isinstance(obj, FacetGrid):
self.ax = None
self.facets = obj
ax_list = obj.axes.flatten()
if obj.col_names is not None:
self.var_levels["col"] = obj.col_names
if obj.row_names is not None:
self.var_levels["row"] = obj.row_names
else:
self.ax = obj
self.facets = None
ax_list = [obj]
# Identify which "axis" variables we have defined
axis_variables = set("xy").intersection(self.variables)
# -- Verify the types of our x and y variables here.
# This doesn't really make complete sense being here here, but it's a fine
# place for it, given the current system.
# (Note that for some plots, there might be more complicated restrictions)
# e.g. the categorical plots have their own check that as specific to the
# non-categorical axis.
if allowed_types is None:
allowed_types = ["numeric", "datetime", "categorical"]
elif isinstance(allowed_types, str):
allowed_types = [allowed_types]
for var in axis_variables:
var_type = self.var_types[var]
if var_type not in allowed_types:
err = (
f"The {var} variable is {var_type}, but one of "
f"{allowed_types} is required"
)
raise TypeError(err)
# -- Get axis objects for each row in plot_data for type conversions and scaling
facet_dim = {"x": "col", "y": "row"}
self.converters = {}
for var in axis_variables:
other_var = {"x": "y", "y": "x"}[var]
converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)
share_state = getattr(self.facets, f"_share{var}", True)
# Simplest cases are that we have a single axes, all axes are shared,
# or sharing is only on the orthogonal facet dimension. In these cases,
# all datapoints get converted the same way, so use the first axis
if share_state is True or share_state == facet_dim[other_var]:
converter.loc[:] = getattr(ax_list[0], f"{var}axis")
else:
# Next simplest case is when no axes are shared, and we can
# use the axis objects within each facet
if share_state is False:
for axes_vars, axes_data in self.iter_data():
ax = self._get_axes(axes_vars)
converter.loc[axes_data.index] = getattr(ax, f"{var}axis")
# In the more complicated case, the axes are shared within each
# "file" of the facetgrid. In that case, we need to subset the data
# for that file and assign it the first axis in the slice of the grid
else:
names = getattr(self.facets, f"{share_state}_names")
for i, level in enumerate(names):
idx = (i, 0) if share_state == "row" else (0, i)
axis = getattr(self.facets.axes[idx], f"{var}axis")
converter.loc[self.plot_data[share_state] == level] = axis
# Store the converter vector, which we use elsewhere (e.g comp_data)
self.converters[var] = converter
# Now actually update the matplotlib objects to do the conversion we want
grouped = self.plot_data[var].groupby(self.converters[var], sort=False)
for converter, seed_data in grouped:
if self.var_types[var] == "categorical":
if self._var_ordered[var]:
order = self.var_levels[var]
else:
order = None
seed_data = categorical_order(seed_data, order)
converter.update_units(seed_data)
# -- Set numerical axis scales
# First unpack the log_scale argument
if log_scale is None:
scalex = scaley = False
else:
# Allow single value or x, y tuple
try:
scalex, scaley = log_scale
except TypeError:
scalex = log_scale if "x" in self.variables else False
scaley = log_scale if "y" in self.variables else False
# Now use it
for axis, scale in zip("xy", (scalex, scaley)):
if scale:
for ax in ax_list:
set_scale = getattr(ax, f"set_{axis}scale")
if scale is True:
set_scale("log")
else:
if LooseVersion(mpl.__version__) >= "3.3":
set_scale("log", base=scale)
else:
set_scale("log", **{f"base{axis}": scale})
# For categorical y, we want the "first" level to be at the top of the axis
if self.var_types.get("y", None) == "categorical":
for ax in ax_list:
try:
ax.yaxis.set_inverted(True)
except AttributeError: # mpl < 3.1
if not ax.yaxis_inverted():
ax.invert_yaxis()
# TODO -- Add axes labels
def _log_scaled(self, axis):
"""Return True if specified axis is log scaled on all attached axes."""
if not hasattr(self, "ax"):
return False
if self.ax is None:
axes_list = self.facets.axes.flatten()
else:
axes_list = [self.ax]
log_scaled = []
for ax in axes_list:
data_axis = getattr(ax, f"{axis}axis")
log_scaled.append(data_axis.get_scale() == "log")
if any(log_scaled) and not all(log_scaled):
raise RuntimeError("Axis scaling is not consistent")
return any(log_scaled)
def _add_axis_labels(self, ax, default_x="", default_y=""):
"""Add axis labels if not present, set visibility to match ticklabels."""
# TODO ax could default to None and use attached axes if present
# but what to do about the case of facets? Currently using FacetGrid's
# set_axis_labels method, which doesn't add labels to the interior even
# when the axes are not shared. Maybe that makes sense?
if not ax.get_xlabel():
x_visible = any(t.get_visible() for t in ax.get_xticklabels())
ax.set_xlabel(self.variables.get("x", default_x), visible=x_visible)
if not ax.get_ylabel():
y_visible = any(t.get_visible() for t in ax.get_yticklabels())
ax.set_ylabel(self.variables.get("y", default_y), visible=y_visible)
# XXX If the scale_* methods are going to modify the plot_data structure, they
# can't be called twice. That means that if they are called twice, they should
# raise. Alternatively, we could store an original version of plot_data and each
# time they are called they operate on the store, not the current state.
def scale_native(self, axis, *args, **kwargs):
# Default, defer to matplotlib
raise NotImplementedError
def scale_numeric(self, axis, *args, **kwargs):
# Feels needed to completeness, what should it do?
# Perhaps handle log scaling? Set the ticker/formatter/limits?
raise NotImplementedError
def scale_datetime(self, axis, *args, **kwargs):
# Use pd.to_datetime to convert strings or numbers to datetime objects
# Note, use day-resolution for numeric->datetime to match matplotlib
raise NotImplementedError
def scale_categorical(self, axis, order=None, formatter=None):
"""
Enforce categorical (fixed-scale) rules for the data on given axis.
Parameters
----------
axis : "x" or "y"
Axis of the plot to operate on.
order : list
Order that unique values should appear in.
formatter : callable
Function mapping values to a string representation.
Returns
-------
self
"""
# This method both modifies the internal representation of the data
# (converting it to string) and sets some attributes on self. It might be
# a good idea to have a separate object attached to self that contains the
# information in those attributes (i.e. whether to enforce variable order
# across facets, the order to use) similar to the SemanticMapping objects
# we have for semantic variables. That object could also hold the converter
# objects that get used, if we can decouple those from an existing axis
# (cf. https://github.com/matplotlib/matplotlib/issues/19229).
# There are some interactions with faceting information that would need
# to be thought through, since the converts to use depend on facets.
# If we go that route, these methods could become "borrowed" methods similar
# to what happens with the alternate semantic mapper constructors, although
# that approach is kind of fussy and confusing.
# TODO this method could also set the grid state? Since we like to have no
# grid on the categorical axis by default. Again, a case where we'll need to
# store information until we use it, so best to have a way to collect the
# attributes that this method sets.
# TODO if we are going to set visual properties of the axes with these methods,
# then we could do the steps currently in CategoricalPlotter._adjust_cat_axis
# TODO another, and distinct idea, is to expose a cut= param here
_check_argument("axis", ["x", "y"], axis)
# Categorical plots can be "univariate" in which case they get an anonymous
# category label on the opposite axis.
if axis not in self.variables:
self.variables[axis] = None
self.var_types[axis] = "categorical"
self.plot_data[axis] = ""
# If the "categorical" variable has a numeric type, sort the rows so that
# the default result from categorical_order has those values sorted after
# they have been coerced to strings. The reason for this is so that later
# we can get facet-wise orders that are correct.
# XXX Should this also sort datetimes?
# It feels more consistent, but technically will be a default change
# If so, should also change categorical_order to behave that way
if self.var_types[axis] == "numeric":
self.plot_data = self.plot_data.sort_values(axis, kind="mergesort")
# Now get a reference to the categorical data vector
cat_data = self.plot_data[axis]
# Get the initial categorical order, which we do before string
# conversion to respect the original types of the order list.
# Track whether the order is given explicitly so that we can know
# whether or not to use the order constructed here downstream
self._var_ordered[axis] = order is not None or cat_data.dtype.name == "category"
order = pd.Index(categorical_order(cat_data, order))
# Then convert data to strings. This is because in matplotlib,
# "categorical" data really mean "string" data, so doing this artists
# will be drawn on the categorical axis with a fixed scale.
# TODO implement formatter here; check that it returns strings?
if formatter is not None:
cat_data = cat_data.map(formatter)
order = order.map(formatter)
else:
cat_data = cat_data.astype(str)
order = order.astype(str)
# Update the levels list with the type-converted order variable
self.var_levels[axis] = order
# Now ensure that seaborn will use categorical rules internally
self.var_types[axis] = "categorical"
# Put the string-typed categorical vector back into the plot_data structure
self.plot_data[axis] = cat_data
return self
class VariableType(UserString):
"""
Prevent comparisons elsewhere in the library from using the wrong name.
Errors are simple assertions because users should not be able to trigger
them. If that changes, they should be more verbose.
"""
allowed = "numeric", "datetime", "categorical"
def __init__(self, data):
assert data in self.allowed, data
super().__init__(data)
def __eq__(self, other):
assert other in self.allowed, other
return self.data == other
def variable_type(vector, boolean_type="numeric"):
"""
Determine whether a vector contains numeric, categorical, or datetime data.
This function differs from the pandas typing API in two ways:
- Python sequences or object-typed PyData objects are considered numeric if
all of their entries are numeric.
- String or mixed-type data are considered categorical even if not
explicitly represented as a :class:`pandas.api.types.CategoricalDtype`.
Parameters
----------
vector : :func:`pandas.Series`, :func:`numpy.ndarray`, or Python sequence
Input data to test.
boolean_type : 'numeric' or 'categorical'
Type to use for vectors containing only 0s and 1s (and NAs).
Returns
-------
var_type : 'numeric', 'categorical', or 'datetime'
Name identifying the type of data in the vector.
"""
# If a categorical dtype is set, infer categorical
if | pd.api.types.is_categorical_dtype(vector) | pandas.api.types.is_categorical_dtype |
import numpy as np
import pandas as pd
from .data import DATA_PATH, COARSE_INDEX
def symmetric_contact_matrix(country, coarse=False):
"""
Return inferred symmetric matrix data from (Fumanelli, 2012)
See Also:
https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1002673#s4
"""
path = DATA_PATH / "contact_matrix" / "fumanelli.xls"
df = pd.read_excel(path, country, header=None)
df.index = range(1, 101)
df.columns = df.index
return _symmetric_contact_matrix_coarse(df) if coarse else df
def _symmetric_contact_matrix_coarse(df):
out = np.zeros((9, 9))
# Add a zeroed row and column to represent the 0 age range
data = np.zeros((101, 101))
data[1:, 1:] = df.values
# Fill 0-9, 10-19, ..., 70-79 ranges
for i in range(0, 80, 10):
for j in range(0, 80, 10):
out[i // 10, j // 10] += data[i : i + 10, j : j + 10].sum()
# Fill 80+ ranges
for i in range(8):
out[i, 8] += data[10 * i : 10 * (i + 1), 80:].sum()
out[8, i] += data[80:, 10 * i : 10 * (i + 1)].sum()
out[8, 8] += data[80:, 80:].sum()
return pd.DataFrame(out, columns=COARSE_INDEX, index=COARSE_INDEX)
def contact_matrix(country="mean", physical=False, coarse=None, infer=False) -> pd.DataFrame:
"""
Load contact matrix for country.
If coarse is True, return a decennial distribution that is compatible with
other data used in this package like mortality rates and coarse age
distributions.
"""
if infer is not False and physical:
raise ValueError("cannot infer physical contact matrix")
elif infer is False:
return _contact_matrix(country, physical, coarse)
elif coarse is False:
raise ValueError("can only infer coarse matrices")
elif infer is True:
demography = age_distribution(country, 2020, coarse=True).values * 1.0
else:
demography = np.asarray(infer)
contacts = symmetric_contact_matrix(country, coarse=True).values
demography = demography / demography.sum()
data = (contacts / demography).T
eig = np.linalg.eigvals(data)
lambd = eig.real.max()
return | pd.DataFrame(data / lambd, index=COARSE_INDEX, columns=COARSE_INDEX) | pandas.DataFrame |
"""Process Shopify Return."""
import pandas as pd
import json
from numpy import nan, sum
from typing import Dict
import timeit
from pyshopify.vars import (keys_list,
order_dtypes,
cust_dtypes,
ref_dtypes,
ref_keys,
refli_keys,
adj_dtypes,
adj_keys,
item_dtypes,
item_keys,
cust_cols,
cust_map,
discapp_map,
discapp_dtypes,
disccode_dtypes,
disccode_map,
discapp_keys,
shipline_keys,
shipline_map,
shipline_dtypes
)
def pandas_work(json_list: json) -> Dict[str, pd.DataFrame]:
"""Parse orders API return data."""
starttime = timeit.default_timer()
table_dict = {}
orders = pd.json_normalize(json_list, ['orders'])
orders.drop(columns=orders.columns.difference(keys_list), inplace=True, axis='columns')
orders.created_at = pd.to_datetime(orders.created_at)
orders.updated_at = pd.to_datetime(orders.updated_at)
orders.fillna(0)
orders = orders.astype(order_dtypes)
orders['payment_gateway_names'] = orders['payment_gateway_names'].astype(str).str.replace('[', '', regex=False)
orders['payment_gateway_names'] = orders['payment_gateway_names'].str.replace(']', '', regex=False)
orders['payment_gateway_names'] = orders['payment_gateway_names'].str.replace("'", '', regex=False)
orders.rename(columns={'created_at': 'order_date'}, inplace=True)
table_dict['Orders'] = orders
shiplines = pd.json_normalize(json_list, ['orders', 'shipping_lines'],
meta=[['orders', 'id'], ['orders', 'created_at']])
if len(shiplines.index) > 0:
shiplines.drop(columns=shiplines.columns.difference(shipline_keys), inplace=True, axis='columns')
for col in shipline_keys:
if col not in shiplines.columns:
shiplines[col] = ''
collist = ['price', 'discounted_price']
for column in collist:
shiplines[column] = shiplines[column].replace(r'\s+', nan, regex=True)
shiplines[column] = shiplines[column].fillna(0)
shiplines.rename(columns=shipline_map, inplace=True)
shiplines.order_date = | pd.to_datetime(shiplines.order_date) | pandas.to_datetime |
from operator import methodcaller
import numpy as np
import pandas as pd
import pytest
from pandas.util import testing as tm
import ibis
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
from ibis.expr.scope import Scope
from ibis.expr.window import get_preceding_value, rows_with_max_lookback
from ibis.udf.vectorized import reduction
from ... import Backend, PandasClient, execute
from ...aggcontext import AggregationContext, window_agg_udf
from ...dispatch import pre_execute
from ...execution.window import get_aggcontext
pytestmark = pytest.mark.pandas
# These custom classes are used inn test_custom_window_udf
class CustomInterval:
def __init__(self, value):
self.value = value
# These are necessary because ibis.expr.window
# will compare preceding and following
# with 0 to see if they are valid
def __lt__(self, other):
return self.value < other
def __gt__(self, other):
return self.value > other
class CustomWindow(ibis.expr.window.Window):
""" This is a dummy custom window that return n preceding rows
where n is defined by CustomInterval.value."""
def _replace(self, **kwds):
new_kwds = {
'group_by': kwds.get('group_by', self._group_by),
'order_by': kwds.get('order_by', self._order_by),
'preceding': kwds.get('preceding', self.preceding),
'following': kwds.get('following', self.following),
'max_lookback': kwds.get('max_lookback', self.max_lookback),
'how': kwds.get('how', self.how),
}
return CustomWindow(**new_kwds)
class CustomAggContext(AggregationContext):
def __init__(
self, parent, group_by, order_by, output_type, max_lookback, preceding
):
super().__init__(
parent=parent,
group_by=group_by,
order_by=order_by,
output_type=output_type,
max_lookback=max_lookback,
)
self.preceding = preceding
def agg(self, grouped_data, function, *args, **kwargs):
upper_indices = pd.Series(range(1, len(self.parent) + 2))
window_sizes = (
grouped_data.rolling(self.preceding.value + 1)
.count()
.reset_index(drop=True)
)
lower_indices = upper_indices - window_sizes
mask = upper_indices.notna()
result_index = grouped_data.obj.index
result = window_agg_udf(
grouped_data,
function,
lower_indices,
upper_indices,
mask,
result_index,
self.dtype,
self.max_lookback,
*args,
**kwargs,
)
return result
@pytest.fixture(scope='session')
def sort_kind():
return 'mergesort'
default = pytest.mark.parametrize('default', [ibis.NA, ibis.literal('a')])
row_offset = pytest.mark.parametrize(
'row_offset', list(map(ibis.literal, [-1, 1, 0]))
)
range_offset = pytest.mark.parametrize(
'range_offset',
[
ibis.interval(days=1),
2 * ibis.interval(days=1),
-2 * ibis.interval(days=1),
],
)
@pytest.fixture
def row_window():
return ibis.window(following=0, order_by='plain_int64')
@pytest.fixture
def range_window():
return ibis.window(following=0, order_by='plain_datetimes_naive')
@pytest.fixture
def custom_window():
return CustomWindow(
preceding=CustomInterval(1),
following=0,
group_by='dup_ints',
order_by='plain_int64',
)
@default
@row_offset
def test_lead(t, df, row_offset, default, row_window):
expr = t.dup_strings.lead(row_offset, default=default).over(row_window)
result = expr.execute()
expected = df.dup_strings.shift(execute(-row_offset))
if default is not ibis.NA:
expected = expected.fillna(execute(default))
tm.assert_series_equal(result, expected)
@default
@row_offset
def test_lag(t, df, row_offset, default, row_window):
expr = t.dup_strings.lag(row_offset, default=default).over(row_window)
result = expr.execute()
expected = df.dup_strings.shift(execute(row_offset))
if default is not ibis.NA:
expected = expected.fillna(execute(default))
tm.assert_series_equal(result, expected)
@default
@range_offset
def test_lead_delta(t, df, range_offset, default, range_window):
expr = t.dup_strings.lead(range_offset, default=default).over(range_window)
result = expr.execute()
expected = (
df[['plain_datetimes_naive', 'dup_strings']]
.set_index('plain_datetimes_naive')
.squeeze()
.shift(freq=execute(-range_offset))
.reindex(df.plain_datetimes_naive)
.reset_index(drop=True)
)
if default is not ibis.NA:
expected = expected.fillna(execute(default))
tm.assert_series_equal(result, expected)
@default
@range_offset
def test_lag_delta(t, df, range_offset, default, range_window):
expr = t.dup_strings.lag(range_offset, default=default).over(range_window)
result = expr.execute()
expected = (
df[['plain_datetimes_naive', 'dup_strings']]
.set_index('plain_datetimes_naive')
.squeeze()
.shift(freq=execute(range_offset))
.reindex(df.plain_datetimes_naive)
.reset_index(drop=True)
)
if default is not ibis.NA:
expected = expected.fillna(execute(default))
tm.assert_series_equal(result, expected)
def test_first(t, df):
expr = t.dup_strings.first()
result = expr.execute()
assert result == df.dup_strings.iloc[0]
def test_last(t, df):
expr = t.dup_strings.last()
result = expr.execute()
assert result == df.dup_strings.iloc[-1]
def test_group_by_mutate_analytic(t, df):
gb = t.groupby(t.dup_strings)
expr = gb.mutate(
first_value=t.plain_int64.first(),
last_value=t.plain_strings.last(),
avg_broadcast=t.plain_float64 - t.plain_float64.mean(),
delta=(t.plain_int64 - t.plain_int64.lag())
/ (t.plain_float64 - t.plain_float64.lag()),
)
result = expr.execute()
gb = df.groupby('dup_strings')
expected = df.assign(
last_value=gb.plain_strings.transform('last'),
first_value=gb.plain_int64.transform('first'),
avg_broadcast=df.plain_float64 - gb.plain_float64.transform('mean'),
delta=(
(df.plain_int64 - gb.plain_int64.shift(1))
/ (df.plain_float64 - gb.plain_float64.shift(1))
),
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_players(players, players_df):
lagged = players.mutate(pct=lambda t: t.G - t.G.lag())
expected = players_df.assign(
pct=players_df.G - players_df.groupby('playerID').G.shift(1)
)
cols = expected.columns.tolist()
result = lagged.execute()[cols].sort_values(cols).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_batting_filter_mean(batting, batting_df):
expr = batting[batting.G > batting.G.mean()]
result = expr.execute()
expected = batting_df[batting_df.G > batting_df.G.mean()].reset_index(
drop=True
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_batting_zscore(players, players_df):
expr = players.mutate(g_z=lambda t: (t.G - t.G.mean()) / t.G.std())
gb = players_df.groupby('playerID')
expected = players_df.assign(
g_z=(players_df.G - gb.G.transform('mean')) / gb.G.transform('std')
)
cols = expected.columns.tolist()
result = expr.execute()[cols].sort_values(cols).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_batting_avg_change_in_games_per_year(players, players_df):
expr = players.mutate(
delta=lambda t: (t.G - t.G.lag()) / (t.yearID - t.yearID.lag())
)
gb = players_df.groupby('playerID')
expected = players_df.assign(
delta=(players_df.G - gb.G.shift(1))
/ (players_df.yearID - gb.yearID.shift(1))
)
cols = expected.columns.tolist()
result = expr.execute()[cols].sort_values(cols).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(AssertionError, reason='NYI')
def test_batting_most_hits(players, players_df):
expr = players.mutate(
hits_rank=lambda t: t.H.rank().over(
ibis.cumulative_window(order_by=ibis.desc(t.H))
)
)
result = expr.execute()
hits_rank = players_df.groupby('playerID').H.rank(
method='min', ascending=False
)
expected = players_df.assign(hits_rank=hits_rank)
tm.assert_frame_equal(result[expected.columns], expected)
def test_batting_quantile(players, players_df):
expr = players.mutate(hits_quantile=lambda t: t.H.quantile(0.25))
hits_quantile = players_df.groupby('playerID').H.transform(
'quantile', 0.25
)
expected = players_df.assign(hits_quantile=hits_quantile)
cols = expected.columns.tolist()
result = expr.execute()[cols].sort_values(cols).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('op', ['sum', 'mean', 'min', 'max'])
def test_batting_specific_cumulative(batting, batting_df, op, sort_kind):
ibis_method = methodcaller('cum{}'.format(op))
expr = ibis_method(batting.sort_by([batting.yearID]).G)
result = expr.execute().astype('float64')
pandas_method = methodcaller(op)
expected = pandas_method(
batting_df[['G', 'yearID']]
.sort_values('yearID', kind=sort_kind)
.G.expanding()
).reset_index(drop=True)
tm.assert_series_equal(result, expected)
def test_batting_cumulative(batting, batting_df, sort_kind):
expr = batting.mutate(
more_values=lambda t: t.G.sum().over(
ibis.cumulative_window(order_by=t.yearID)
)
)
result = expr.execute()
columns = ['G', 'yearID']
more_values = (
batting_df[columns]
.sort_values('yearID', kind=sort_kind)
.G.expanding()
.sum()
.astype('int64')
)
expected = batting_df.assign(more_values=more_values)
| tm.assert_frame_equal(result[expected.columns], expected) | pandas.util.testing.assert_frame_equal |
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc # conda install -c conda-forge dash-bootstrap-components
from dash.dependencies import Output, Input
import pandas as pd
import numpy as np
import requests
import sys
import web_service as ws
from datetime import datetime
import datetime as dt
import pytz
import dash_daq as daq
pytz.all_timezones
def get_now():
tz_NZ = pytz.timezone('Pacific/Auckland')
datetime_NZ = datetime.now(tz_NZ)
return datetime_NZ.strftime("%Y-%m-%d %H:%M")
def start_date(daysBeforeNow=7):
tz_NZ = pytz.timezone('Pacific/Auckland')
datetime_NZ = datetime.now(tz_NZ)
day_delta = dt.timedelta(days=daysBeforeNow)
from_date = datetime_NZ - day_delta
return from_date.strftime("%Y-%m-%d %H:%M")
def get_data(site):
### Parameters
base_url = 'http://tsdata.horizons.govt.nz/'
hts = 'boo.hts'
measurement = 'Stage [Water Level]'
from_date = start_date(7)
to_date = get_now()
#dtl_method = 'trend'
df = ws.get_data_basic(base_url,hts,site,measurement,from_date,to_date)
# columns=['Site', 'Measurement', 'Parameter', 'DateTime', 'Value'])
return df
def get_all_stage_data():
base_url = 'http://tsdata.horizons.govt.nz/'
hts = 'boo.hts'
collection = 'River Level'
from_date = start_date(3)
to_date = get_now()
df = ws.get_datatable(base_url, hts, collection, from_date=from_date, to_date=to_date)
return df
def get_thresholds(site):
# import threshold file
df = pd.read_csv("thresholds.csv")
df = df.query("SiteName == '"+site+"'")
return df
site = "Makino at Rata Street"
site = "Manawatu at Teachers College"
data = get_data(site)
data["T"] = | pd.to_datetime(data["T"],infer_datetime_format=True) | pandas.to_datetime |
from mpl_toolkits import mplot3d
from matplotlib.ticker import MaxNLocator
import matplotlib.pyplot as plt
import matplotlib.colors
import matplotlib.animation as animation
import numpy as np
import pandas as pd
import copy
import sys
import scipy as sy
import os
from scipy import fftpack
from tools.my_setup import *
# plt.rc('text', usetex=True)
turn_off_graphs=True
turn_off_graphs=False
# original = copy.deepcopy(particles)
for i in range(0,num_particles):
if any(particles.identifier == i):
change_i = particles.iloc[i].identifier
particles.loc[particles.identifier == change_i, 'identifier'] = -i
q = particles[particles.timestep == 0]
q = q.sort_values(by=['temperature'])
for i in range(0,num_particles):
change_i = q.iloc[i].identifier
new_i = i * (-1) - 1
particles.replace({'identifier' : change_i}, new_i, inplace=True)
particles.identifier *= -1
# oparticles = particles[['timestep','x','y','z_meters', 'identifier', 'image']]
# particles = particles[['timestep','identifier','temperature','z_meters','pressure']]
# particles = particles[particles.identifier < 600000000]
# particles = particles[particles.identifier < -14]
# --- function that gets run every animation timestep ---
blue_cmap = plt.cm.Blues
discrete_cmap = plt.get_cmap('tab20b')
rh_cmap = plt.get_cmap('gist_gray')
# discrete_cmap = plt.get_cmap('tab10')
# discrete_cmap = plt.get_cmap('Set1')
# discrete_cmap = plt.get_cmap('Paired')
particles.z_meters /= 1000
particles.pressure /= 100
rows = 2 # 2
cols = 3
fig = plt.figure()
# ax = fig.add_subplot(rows,cols,1)
# if True: # -----plot temperature-----
# # particles.temperature -= 273.15
# sc = ax.scatter(particles.temperature, particles.z_meters,
# # ax.scatter(particles.z_meters, particles.temperature,
# cmap=discrete_cmap, c=particles.identifier, marker='.')
# ax.set_xlabel("temperature (K)")
# ax.set_ylabel("elevation (km)")
# # ax.set_xlabel("elevation (km)")
# # ax.set_ylabel("temperature (K)")
# if True: # -----plot pressure-----
# ax2 = fig.add_subplot(rows,cols,2)
# ax2.scatter(particles.pressure, particles.z_meters,
# # ax2.scatter(particles.z_meters, particles.pressure,
# cmap=discrete_cmap, c=particles.identifier, marker='.')
# # ax2.set_xlabel("elevation (km)")
# # ax2.set_ylabel("pressure")
# ax2.set_xlabel("pressure (kPa)")
# # ax2.set_ylabel("elevation (km)")
# plt.setp(ax2,yticklabels=[])
# plt.setp(ax2.get_yticklabels(), visible=False)
comparison = particles[particles.timestep == 0].relative_humidity.values == particles[particles.timestep == 1].relative_humidity.values
if (comparison.all()):
relative_humidity = False
else:
relative_humidity = True
add=1 - 3
if relative_humidity == False:
# -----plot temp over time-----
ax3 = fig.add_subplot(rows,cols,3+add)
ax3.scatter(particles.timestep, particles.temperature,
cmap=discrete_cmap, c=particles.identifier, marker='.')
ax3.set_xlabel("timesteps")
ax3.set_ylabel("temp (K)")
# -----plot pressure over time-----
ax4 = fig.add_subplot(rows,cols,4+add)
ax4.scatter(particles.timestep, particles.pressure,
cmap=discrete_cmap, c=particles.identifier, marker='.')
ax4.set_xlabel("timesteps")
ax4.set_ylabel("pressure (hPa)")
if relative_humidity == True:
rh = particles[particles.relative_humidity >= 1.0]
no_rh = particles[particles.relative_humidity < 1.0]
# ------- no rh -------
# -----plot temp over time-----
ax3 = fig.add_subplot(rows,cols,3+add)
ax3.scatter(no_rh.timestep, no_rh.temperature,
cmap=discrete_cmap, c=no_rh.identifier, marker='.')
ax3.set_xlabel("timesteps")
ax3.set_ylabel("temp (K)")
# -----plot pressure over time-----
ax4 = fig.add_subplot(rows,cols,4+add)
ax4.scatter(no_rh.timestep, no_rh.pressure,
cmap=discrete_cmap, c=no_rh.identifier, marker='.')
ax4.set_xlabel("timesteps")
ax4.set_ylabel("pressure (hPa)")
# -------- rh ---------
# -----plot temp over time-----
ax3.scatter(rh.timestep, rh.temperature,
cmap=rh_cmap, c=rh.identifier, marker='.')
ax3.set_xlabel("timesteps")
ax3.set_ylabel("Temp (K)")
# -----plot pressure over time-----
ax4.scatter(rh.timestep, rh.pressure,
cmap=rh_cmap, c=rh.identifier, marker='.')
ax4.set_xlabel("timesteps")
ax4.set_ylabel("pressure (hPa)")
# fig.legend(['Grey Scale when relative humidity > 1.0'], loc='lower left')
title="Temperature and Pressure of \n"
title += str(num_particles) + " saturated particles, " + str(num_t) + " timesteps"
# title += ", " + f.name.split('/')[1]
# plt.suptitle(title)
# --- plot potential temperature ---
ax55 = fig.add_subplot(rows,cols,5+add)
ax55.scatter(particles.timestep, particles.potential_temperature,
cmap=discrete_cmap, c=particles.identifier, marker='.')
ax55.set_xlabel("timesteps")
ax55.set_ylabel("potential temp. (K)")
# plot altitude and the change of potential temp, temp, pressure
ax7 = fig.add_subplot(rows,cols,6+add)
ax7.scatter(particles.temperature, particles.z_meters,
cmap=discrete_cmap, c=particles.identifier, marker='.')
ax7.set_xlabel("Temp (K)")
ax7.set_ylabel("elevation (km)")
ax8 = fig.add_subplot(rows,cols,7+add)
# ax8.scatter(particles.temperature, particles.pressure,
# cmap=discrete_cmap, c=particles.identifier, marker='.')
# ax8.set_xlabel("Temp (K)")
# ax8.set_ylabel("pressure (hPa)")
# ax8.invert_yaxis()
ax8.scatter(particles.pressure, particles.z_meters,
cmap=discrete_cmap, c=particles.identifier, marker='.')
ax8.set_xlabel("pressure (hPa)")
ax8.set_ylabel("elevation (km)")
ax9 = fig.add_subplot(rows,cols,8+add)
ax9.scatter(particles.potential_temperature, particles.z_meters,
cmap=discrete_cmap, c=particles.identifier, marker='.')
ax9.set_xlabel("potential temp. (K)")
ax9.set_ylabel("elevation (km)")
plt.tight_layout()
if 'dry' in os.path.splitext(f.name)[0]:
filename='validation_dry_parcels'
else:
filename='validation_saturated_parcels'
filename+=".png"
# fig.set_size_inches((4,3))
fig.set_size_inches((6,4))
plt.tight_layout()
plt.savefig(filename, pad_inches=0.0, dpi=400)
# fig.save(filename, pdf=False, pgf=True)
print("file "+filename+" outputted")
print(fig.get_size_inches(), " and ",fig.dpi)
print(f.name, " ", os.path.splitext(f.name)[0])
# should be ~ 3.07
# is 6.4, 4.8
#
plt.show()
print("Fin!")
sys.exit()
# ---- plot table ----
t = particles[particles.identifier == 1].timestep.to_numpy()
# ax5 = fig.add_subplot(3,1,3)
ax5 = fig.add_subplot(3,2,6)
t_x = 3
t_y = 1
table_dim = (t_x,t_y)
table_data = np.zeros(table_dim, dtype=float)
for row in range(1,num_particles+1):
x = particles[particles.identifier == row].temperature.to_numpy()
x = x[~np.isnan(x)]
x = x - x[0] # center data
period = np.diff(np.where(np.diff(np.sign(x)) < 0))
ave_period = np.average(np.diff(np.where(np.diff(np.sign(x)) < 0)))
ave_freq = 1 / ave_period
X = fftpack.fft(x)
freqs = fftpack.fftfreq(len(x))
# print(table_data)
# table_data = np.c_[table_data, ['{:,.5f}'.format(ave_freq), ave_period, '']]
table_data = np.c_[table_data, ['{:,.5f}'.format(ave_freq),
'{:,.2f}'.format(ave_period),
'']]
table_data = table_data[:,1:]
col_names = ['ave. frequency','ave. period','particle color']
table_data = table_data.transpose()
table_df = | pd.DataFrame(data=table_data, columns=col_names) | pandas.DataFrame |
import os
import shutil
import pickle
from torch import nn
import torch
import pandas as pd
import random
import numpy as np
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torch.cuda.amp import autocast,GradScaler
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from torchvision import transforms
from utils import dfs_remove_weight
from feature_selection import select_feature_linesvc
ACTIVATION = {
'relu':nn.ReLU(inplace=True),
'elu':nn.ELU(inplace=True),
'leakyrelu':nn.LeakyReLU(inplace=True),
'prelu':nn.PReLU(),
'gelu':nn.GELU(),
'tanh':nn.Tanh()
}
class RandomMask1d(object):
def __init__(self,factor=0.1,p=0.5):
self.factor = factor
self.p = p
def __call__(self,seq):
"""
seq: vector of 1d
"""
if np.random.rand() > self.p:
mask = (np.random.random_sample(seq.shape) > self.factor).astype(np.float32)
seq = seq * mask
return seq
class RandomScale1d(object):
def __init__(self,factor=0.2,p=0.5):
self.factor = factor
self.p = p
def __call__(self,seq):
"""
seq: vector of 1d
"""
if np.random.rand() > self.p:
mask = (np.random.random_sample(seq.shape)*self.factor + (1.0-self.factor/2)).astype(np.float32)
seq = seq * mask
return seq
class MyDataset(Dataset):
def __init__(self, X, Y=None,transform=None):
self.X = X
self.Y = Y
self.transform = transform
def __len__(self):
return len(self.X)
def __getitem__(self,index):
data = self.X[index]
if self.transform is not None:
data = self.transform(data)
if self.Y is not None:
target = self.Y[index]
sample = {'data':torch.from_numpy(data), 'target':int(target)}
else:
sample = {'data':torch.from_numpy(data)}
return sample
class MLP_CLASSIFIER(nn.Module):
def __init__(self, input_size, output_size=245, depth=3, depth_list=[256,128,64], drop_prob=0.5, use_norm=True,activation=None):
super(MLP_CLASSIFIER, self).__init__()
assert len(depth_list) == depth
self.linear_list = []
for i in range(depth):
if i == 0:
self.linear_list.append(nn.Linear(input_size,depth_list[i]))
else:
self.linear_list.append(nn.Linear(depth_list[i-1],depth_list[i]))
if use_norm:
self.linear_list.append(nn.BatchNorm1d(depth_list[i]))
self.linear_list.append(nn.Dropout(0.2))
if activation is None:
self.linear_list.append(nn.ReLU(inplace=True))
else:
self.linear_list.append(activation)
# self.linear_list.append(nn.Tanh())
# self.linear_list.append(nn.Dropout(0.5)) #pro
self.linear = nn.Sequential(*self.linear_list)
self.drop = nn.Dropout(drop_prob) if drop_prob > 0.0 else None
self.cls_head = nn.Linear(depth_list[-1],output_size)
def forward(self, x):
x = self.linear(x) #N*C
if self.drop:
x = self.drop(x)
x = self.cls_head(x)
return x
class AverageMeter(object):
'''
Computes and stores the average and current value
'''
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
'''
Computes the precision@k for the specified values of k
'''
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(1/batch_size))
return res
def train_epoch(epoch,net,criterion,optim,train_loader,scaler,use_fp16=True):
net.train()
train_loss = AverageMeter()
train_acc = AverageMeter()
for step, sample in enumerate(train_loader):
data = sample['data']
target = sample['target']
# print(data.size())
data = data.cuda()
target = target.cuda()
with autocast(use_fp16):
output = net(data)
loss = criterion(output, target)
optim.zero_grad()
if use_fp16:
scaler.scale(loss).backward()
scaler.step(optim)
scaler.update()
else:
loss.backward()
optim.step()
output = output.float()
loss = loss.float()
# measure accuracy and record loss
acc = accuracy(output.data, target)[0]
train_loss.update(loss.item(), data.size(0))
train_acc.update(acc.item(), data.size(0))
torch.cuda.empty_cache()
# if step % 10 == 0:
# print('epoch:{},step:{},train_loss:{:.5f},train_acc:{:.5f},lr:{}'
# .format(epoch, step, loss.item(), acc.item(), optim.param_groups[0]['lr']))
return train_acc.avg,train_loss.avg
def val_epoch(epoch,net,criterion,val_loader,use_fp16=True):
net.eval()
val_loss = AverageMeter()
val_acc = AverageMeter()
with torch.no_grad():
for step, sample in enumerate(val_loader):
data = sample['data']
target = sample['target']
data = data.cuda()
target = target.cuda()
with autocast(use_fp16):
output = net(data)
loss = criterion(output, target)
output = output.float()
loss = loss.float()
# measure accuracy and record loss
acc = accuracy(output.data, target)[0]
val_loss.update(loss.item(), data.size(0))
val_acc.update(acc.item(), data.size(0))
torch.cuda.empty_cache()
# print('epoch:{},step:{},val_loss:{:.5f},val_acc:{:.5f}'
# .format(epoch, step, loss.item(), acc.item()))
return val_acc.avg,val_loss.avg
def evaluation(test_data,net,weight_path,use_fp16=True):
ckpt = torch.load(weight_path)
net.load_state_dict(ckpt['state_dict'])
net.eval()
test_data = torch.from_numpy(test_data) #N*fea_len
with torch.no_grad():
data = test_data.cuda()
with autocast(use_fp16):
output = net(data)
output = output.float()
prob_output = torch.softmax(output,dim=1) # N*C
output = torch.argmax(prob_output,dim=1) #N
torch.cuda.empty_cache()
return output.cpu().numpy().tolist(),prob_output.cpu().numpy()
def evaluation_tta(test_data,net,weight_path,use_fp16=True,tta=5):
ckpt = torch.load(weight_path)
net.load_state_dict(ckpt['state_dict'])
net.eval()
transform = transforms.Compose([RandomScale1d()])
vote_out = []
prob_out = []
for _ in range(tta):
input_data = transform(test_data)
input_data = torch.from_numpy(input_data) #N*fea_len
with torch.no_grad():
data = input_data.cuda()
with autocast(use_fp16):
output = net(data)
output = output.float()
prob_output = torch.softmax(output,dim=1) #N*C
output = torch.argmax(prob_output,dim=1) #N
vote_out.append(output.cpu().numpy().tolist())
prob_out.append(prob_output.cpu().numpy()) # tta*N*C
torch.cuda.empty_cache()
vote_array = np.asarray(vote_out).astype(np.uint8) # tta*N
vote_out = [max(list(vote_array[:,i]),key=list(vote_array[:,i]).count) for i in range(vote_array.shape[1])]
return vote_out, np.mean(prob_out, axis=0)
def manual_select(total_list,exclude_list=None):
fea_list = []
nouse_list = []
if exclude_list is not None:
for exclude_label in exclude_list:
if 'sum' not in exclude_label and 'avg' not in exclude_label:
nouse_list += [f'{exclude_label}_{str(i)}' for i in range(400)]
else:
nouse_list += [f'{exclude_label}_{str(i)}' for i in range(40)]
for col in total_list:
if col not in nouse_list:
fea_list.append(col)
return fea_list
else:
return total_list
def run(train_path,test_path,output_dir,net_depth=3,exclude_list=None,scale_flag=True,select_flag=False,dim_reduction=False,aug_flag=False,tta=False,**aux_config):
torch.manual_seed(0)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
else:
shutil.rmtree(output_dir)
os.makedirs(output_dir)
# load training and testing data
train_df = pd.read_csv(train_path)
test_df = pd.read_csv(test_path)
# data preprocessing
del train_df['seq']
del test_df['seq']
test_id = test_df['id']
# print(list(test_id))
del train_df['id']
del test_df['id']
# manual select
fea_list = manual_select(train_df.columns,exclude_list)
fea_list = [f for f in fea_list if f not in ['label']]
le = LabelEncoder()
label_list = set(train_df['label'])
print(random.sample(label_list,1))
train_df['label'] = le.fit_transform(train_df['label'])
# convert to numpy array
Y = np.asarray(train_df['label']).astype(np.uint8)
X = np.asarray(train_df[fea_list]).astype(np.float32)
test = np.asarray(test_df[fea_list]).astype(np.float32)
# feature selection
if select_flag:
select_model_path = './select_model.pkl'
if os.path.exists(select_model_path):
with open(select_model_path, 'rb') as f:
select_model = pickle.load(f)
else:
select_model = select_feature_linesvc(X, Y, select_model_path)
X = select_model.transform(X)
test = select_model.transform(test)
# data scale
if scale_flag:
X_len = X.shape[0]
data_scaler = StandardScaler()
cat_data = np.concatenate([X,test],axis=0)
cat_data= data_scaler.fit_transform(cat_data)
X = cat_data[:X_len]
test = cat_data[X_len:]
# dim reduction
if dim_reduction:
X_len = X.shape[0]
cat_data = np.concatenate([X,test],axis=0)
# fastica = FastICA(n_components=int(X.shape[1]*0.5),random_state=0)
# cat_data= fastica.fit_transform(cat_data)
pca = PCA(n_components=int(X.shape[1]*0.5))
cat_data= pca.fit_transform(cat_data)
X = cat_data[:X_len]
test = cat_data[X_len:]
# print(Y)
print(X.shape,test.shape)
num_classes = len(set(train_df['label'])) #245
fea_len = X.shape[1]
total_result = []
total_prob_result = []
kfold = KFold(n_splits=5,shuffle=True,random_state=21)
for fold_num,(train_index,val_index) in enumerate(kfold.split(X)):
print(f'***********fold {fold_num+1} start!!***********')
fold_dir = os.path.join(output_dir,f'fold{fold_num+1}')
if not os.path.exists(fold_dir):
os.makedirs(fold_dir)
# initialization
epoch_num = 100
acc_threshold = 0.0
depth = net_depth
# depth_list = [int(fea_len*(2**(1-i))) for i in range(depth)]
depth_list = [fea_len - i*(fea_len-num_classes)// depth for i in range(1,depth)]
print('depth list:',depth_list)
net = MLP_CLASSIFIER(fea_len,num_classes,len(depth_list),depth_list,**aux_config)
criterion = nn.CrossEntropyLoss()
optim = torch.optim.Adam(net.parameters(), lr=0.001, weight_decay=0.0001)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optim, [25,60,80], gamma=0.1)
scaler = GradScaler()
net = net.cuda()
criterion = criterion.cuda()
# data loader
x_train, x_val = X[train_index], X[val_index]
y_train, y_val = Y[train_index], Y[val_index]
print('Train Data Size:',x_train.shape)
print('Val Data Size:',x_val.shape)
if aug_flag:
transform = transforms.Compose([RandomScale1d()])
else:
transform = None
train_dataset = MyDataset(X=x_train,Y=y_train,transform=transform)
val_dataset = MyDataset(X=x_val,Y=y_val,transform=transform)
train_loader = DataLoader(
train_dataset,
batch_size=256,
shuffle=True,
num_workers=2)
val_loader = DataLoader(
val_dataset,
batch_size=256,
shuffle=False,
num_workers=2)
# main processing
for epoch in range(epoch_num):
train_acc, train_loss = train_epoch(epoch,net,criterion,optim,train_loader,scaler)
val_acc,val_loss = val_epoch(epoch,net,criterion,val_loader)
torch.cuda.empty_cache()
if epoch % 10 == 0:
print('Train epoch:{},train_loss:{:.5f},train_acc:{:.5f}'
.format(epoch, train_loss, train_acc))
print('Val epoch:{},val_loss:{:.5f},val_acc:{:.5f}'
.format(epoch, val_loss, val_acc))
if lr_scheduler is not None:
lr_scheduler.step()
if val_acc > acc_threshold:
acc_threshold = val_acc
saver = {
'state_dict': net.state_dict()
}
file_name = 'epoch:{}-val_acc:{:.5f}-val_loss:{:.5f}-mlp.pth'.format(epoch,val_acc,val_loss)
save_path = os.path.join(fold_dir, file_name)
print('Save as: %s'%file_name)
torch.save(saver, save_path)
# save top3 model
dfs_remove_weight(fold_dir,retain=1)
# generating test result using the best model
if tta:
fold_result,fold_prob_result = evaluation_tta(test,net,save_path,tta=9) #N,N*C
else:
fold_result,fold_prob_result = evaluation(test,net,save_path) #N,N*C
total_result.append(fold_result)
total_prob_result.append(fold_prob_result)
fold_prob_result = np.argmax(fold_prob_result,axis=1).astype(np.uint8).tolist()
fold_result = le.inverse_transform(fold_result)
fold_prob_result = le.inverse_transform(fold_prob_result)
# csv save
fold_vote_csv = {}
fold_vote_csv = | pd.DataFrame(fold_vote_csv) | pandas.DataFrame |
import os.path as op
import copy
import numpy as np
from sklearn.dummy import DummyRegressor
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import RidgeCV
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score, ShuffleSplit
import mne
import pandas as pd
import config as cfg
from library.spfiltering import (
ProjIdentitySpace, ProjCommonSpace, ProjSPoCSpace)
from library.featuring import Riemann, LogDiag, NaiveVec
from joblib import Parallel, delayed
n_compo = 21
n_components = np.arange(n_compo)+1
scale = 'auto'
metric = 'riemann'
shrink = .5 # to regularize SPoC
seed = 42
test_size = .1
n_splits = 100
n_jobs = 40
fname = op.join(cfg.derivative_path, 'covs_tuh_oas.h5')
covs = mne.externals.h5io.read_hdf5(fname)
X = np.array([d['covs'] for d in covs if 'subject' in d and d['age'] >= 10])
y = np.array([d['age'] for d in covs if 'subject' in d and d['age'] >= 10])
n_sub, n_fb, n_ch, _ = X.shape
ridge_shrinkage = np.logspace(-3, 5, 100)
pipelines = {
'dummy': make_pipeline(
ProjIdentitySpace(),
LogDiag(),
StandardScaler(),
DummyRegressor()
),
'naive': make_pipeline(
ProjIdentitySpace(),
NaiveVec(method='upper'),
StandardScaler(),
RidgeCV(alphas=ridge_shrinkage)
),
'log-diag': make_pipeline(
ProjIdentitySpace(),
LogDiag(),
StandardScaler(),
RidgeCV(alphas=ridge_shrinkage)
),
'spoc': make_pipeline(
ProjSPoCSpace(n_compo=n_compo, scale=scale,
reg=0, shrink=shrink),
LogDiag(),
StandardScaler(),
RidgeCV(alphas=ridge_shrinkage)
),
'riemann': make_pipeline(
ProjCommonSpace(scale=scale, n_compo=n_compo,
reg=1.e-05),
Riemann(n_fb=n_fb, metric=metric),
StandardScaler(),
RidgeCV(alphas=ridge_shrinkage)
)
}
def run_low_rank(n_components, X, y, cv, estimators, scoring):
out = dict(n_components=n_components)
for name, est in estimators.items():
print(name, n_components)
this_est = est
this_est.steps[0][1].n_compo = n_components
scores = cross_val_score(
X=X, y=y, cv=copy.deepcopy(cv), estimator=this_est,
n_jobs=1,
scoring=scoring)
if scoring == 'neg_mean_absolute_error':
scores = -scores
print(np.mean(scores), f"+/-{np.std(scores)}")
out[name] = scores
return out
low_rank_estimators = {k: v for k, v in pipelines.items()
if k in ('spoc', 'riemann')}
out_list = Parallel(n_jobs=n_jobs)(delayed(run_low_rank)(
n_components=cc, X=X, y=y,
cv=ShuffleSplit(test_size=.1, n_splits=10, random_state=seed),
estimators=low_rank_estimators, scoring='neg_mean_absolute_error')
for cc in n_components)
out_frames = list()
for this_dict in out_list:
this_df = pd.DataFrame({'spoc': this_dict['spoc'],
'riemann': this_dict['riemann']})
this_df['n_components'] = this_dict['n_components']
this_df['fold_idx'] = np.arange(len(this_df))
out_frames.append(this_df)
out_df = | pd.concat(out_frames) | pandas.concat |
import os
from typing import Dict
from abc import ABC
from easy_sdm.data import ShapefileRegion
import geopandas as gpd
import numpy as np
import pandas as pd
import requests
from easy_sdm.configs import configs
from easy_sdm.utils import logger
from typing import Dict, Optional
from pathlib import Path
class GBIFOccurencesRequester:
"""[This class makes request to GBIF]
"""
def __init__(self, taxon_key: int, species_name: str):
self.taxon_key = taxon_key
self.species_name = species_name
self.base_url = "http://api.gbif.org/v1/occurrence/search"
def request(self, offset: int = 0):
"""[ Request GBIF information about an species]
Args:
offset (int, optional): [Offsset is a parameter to where starting the
request in GBIF databse, since the requests have a
limit of 300 row for request]. Defaults to 0.
Returns:
[type]: [int]
"""
gbif_configs = configs["gbif"]
params = {
"taxonKey": str(self.taxon_key),
"limit": gbif_configs["one_request_limit"],
"hasCoordinate": True,
"year": f"{gbif_configs['low_year']},{gbif_configs['up_year']}",
"country": gbif_configs["country"],
"offset": offset,
}
r = requests.get(self.base_url, params=params)
status_code = r.status_code
if r.status_code != 200:
logger.logging.info(
f"API call failed at offset {offset} with a status code of {r.status_code}."
)
end_of_records = True
else:
r = r.json()
end_of_records = r["endOfRecords"]
return r, end_of_records, status_code
class Species:
def __init__(self, taxon_key: int, name: str):
self.taxon_key = taxon_key
self.name = name
def __str__(self) -> str:
return "Species {self.name} with taxon key {self.taxon_key}"
class SpeciesDFBuilder:
"""[This class organize data requested to GBIF into pandas dataframes]
"""
def __init__(self, species: Species):
self.gbif_occ_requester = GBIFOccurencesRequester(
species.taxon_key, species.name
)
self.__df_memory = None
def get_specie_df(self):
"""Get species as DataFrame"""
if self.__df_memory:
df = self.__df_memory
else:
df = self.__request_species_df()
df = self.__clean_species_df(df)
self.__df_memory = df
return df
def __request_species_df(self):
"""[Organizes GBIF information in a dataframe considering offsets ]"""
end_of_records = False
offset = 0
status = 200
df = None
while end_of_records == False and status == 200:
r, end_of_records, status = self.gbif_occ_requester.request(offset)
df = self.__build_species_df(r, df)
offset = len(df) + 1
self.__clean_species_df(df)
return df
def __build_species_df(self, request, df=None):
"""[Create species dataframe with the request data]
Args:
df ([type]): [description]
request ([type]): [description]
Returns:
[df]: [description]
"""
if df is None:
df = pd.DataFrame(
columns=[
"SCIENTIFIC_NAME",
"LONGITUDE",
"LATITUDE",
"COUNTRY",
"STATE_PROVINCE",
"IDENTIFICATION_DATE",
"DAY",
"MONTH",
"YEAR",
]
)
for result in request["results"]:
result = self.__refact_dict(result)
df = df.append(
{
"SCIENTIFIC_NAME": result["scientificName"],
"LONGITUDE": result["decimalLongitude"],
"LATITUDE": result["decimalLatitude"],
"COUNTRY": result["country"],
"STATE_PROVINCE": result["stateProvince"],
"IDENTIFICATION_DATE": result["eventDate"],
"DAY": result["day"],
"MONTH": result["month"],
"YEAR": result["year"],
},
ignore_index=True,
)
return df
def __refact_dict(self, result: Dict):
"""Refact dict placing None in empty cells"""
columns = result.keys()
desired_columns = [
"scientificName",
"decimalLongitude",
"decimalLatitude",
"country",
"stateProvince",
"eventDate",
"day",
"month",
"year",
"occurrenceRemarks",
]
for d_col in desired_columns:
if d_col not in columns:
result[d_col] = None
return result
def __clean_species_df(self, df: pd.DataFrame):
"""[Cleaning Gbif Data]
Args:
df ([pd.DaraFrame]): [description]
Returns:
[pd.DaraFrame]: [description]
"""
# Double check to certify there is no empty lat/long data
df = df[pd.notnull(df["LATITUDE"])]
df = df[ | pd.notnull(df["LONGITUDE"]) | pandas.notnull |
import argparse
import sys
import os
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import glob
from sklearn import metrics
from scipy.stats import pearsonr, spearmanr
from scipy.optimize import curve_fit
from collections import Counter
import pickle
import pdb
parser = argparse.ArgumentParser(description = '''Visualize and analyze the DockQ scores.''')
#Bench4
parser.add_argument('--bench4_dockq_aa', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for benchmark 4 AF in csv.')
parser.add_argument('--bench4_dockq_RF', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for benchmark 4 from RF in csv.')
parser.add_argument('--plDDT_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
#parser.add_argument('--pconsdock_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to pconsdock metrics in csv.')
#parser.add_argument('--pconsdock_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to pconsdock metrics in csv.')
parser.add_argument('--bench4_kingdom', nargs=1, type= str, default=sys.stdin, help = 'Path to kingdoms for bench4 in csv.')
parser.add_argument('--dssp_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to dssp annotations for bench4 in csv.')
parser.add_argument('--afdefault_neff_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to default AF alignments Neff in csv.')
parser.add_argument('--tophits_neff_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to top hits alignments Neff in csv.')
#Marks positivef
parser.add_argument('--marks_dockq_RF', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set RF in csv.')
parser.add_argument('--marks_dockq_AF_bb', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set AF back bone atoms in csv.')
parser.add_argument('--marks_dockq_AF_aa', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set AF all atoms in csv.')
parser.add_argument('--marks_dockq_GRAMM', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set GRAMM in csv.')
parser.add_argument('--marks_dockq_TMfull', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set TMdock in csv.')
parser.add_argument('--marks_dockq_TMint', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set interface TMdock in csv.')
parser.add_argument('--marks_dockq_mdockpp', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set MdockPP in csv.')
parser.add_argument('--plDDT_marks_af', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
parser.add_argument('--plDDT_marks_fused', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
parser.add_argument('--dssp_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to dssp metrics in csv.')
parser.add_argument('--ifstats_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to if metrics in csv.')
parser.add_argument('--aln_scores_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to aln scores in csv.')
parser.add_argument('--oxstats_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to statistics over organisms in csv.')
parser.add_argument('--afdefault_neff_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to default AF alignments Neff in csv.')
parser.add_argument('--tophits_neff_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to top hits alignments Neff in csv.')
parser.add_argument('--af_chain_overlap_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to chain overlap for AF a3m in csv.')
#Marks negative
parser.add_argument('--plDDT_marks_negative_af', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
#Negatome
parser.add_argument('--plDDT_negatome_af', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
#New set
parser.add_argument('--newset_dockq_AF', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for new set AF in csv.')
parser.add_argument('--plDDT_newset', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv for newset.')
#Output directory
parser.add_argument('--outdir', nargs=1, type= str, default=sys.stdin, help = 'Path to output directory. Include /in end')
################FUNCTIONS#################
def dockq_box(bench4_dockq, outdir):
'''Plot a boxplot of the dockq score for the different modes
'''
#Plot
fig,ax = plt.subplots(figsize=(24/2.54,12/2.54))
modes = bench4_dockq.columns[1:]
all_modes = []
all_scores = []
all_msas = []
all_model_options = []
accuracies = {}
for mode in modes:
#Frac correct and avg score
fraq_correct = np.argwhere(bench4_dockq[mode].values>=0.23).shape[0]/len(bench4_dockq)
accuracies[mode]=fraq_correct
av = np.average(bench4_dockq[mode].values)
print(mode, np.round(fraq_correct,3),np.round(av,3))
#Save scores
all_scores.extend([*bench4_dockq[mode].values])
mode = '_'.join(mode.split('_')[4:])
mode = mode.split('_')
msa = mode[0]
model = '_'.join(mode[1:-1])
option = mode[-1]
#save
all_modes.extend([msa+'\n'+model+'\n'+option]*len(bench4_dockq))
all_msas.extend([msa]*len(bench4_dockq))
all_model_options.extend([model+' '+option]*len(bench4_dockq))
def correlate_scores(bench4_dockq, outdir):
'''Correlate the scores for all different modeling strategies
'''
modes = ['DockQ_dockqstats_bench4_af2_hhblits_model_1_ens8',
'DockQ_dockqstats_bench4_af2_hhblits_model_1_rec10',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_ens8',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_rec10',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_ens8',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_rec10',
'DockQ_dockqstats_bench4_af2_hhblits_model_1_ptm_ens8',
'DockQ_dockqstats_bench4_af2_hhblits_model_1_ptm_rec10',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_ptm_ens8',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_ptm_rec10',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_ptm_ens8',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_ptm_rec10']
corr_matrix = np.zeros((len(modes),len(modes)))
for i in range(len(modes)):
scores_i = bench4_dockq[modes[i]].values
for j in range(i+1,len(modes)):
scores_j = bench4_dockq[modes[j]].values
#Correlate
R,p = pearsonr(scores_i,scores_j)
corr_matrix[i,j]=np.round(R,2)
corr_matrix[j,i]=np.round(R,2)
print(modes)
print(corr_matrix)
#Create df
corr_df = pd.DataFrame()
modes = ['_'.join(x.split('_')[4:]) for x in modes]
corr_df['Comparison'] = modes
for i in range(len(modes)):
corr_df[modes[i]]=corr_matrix[i,:]
corr_df.to_csv(outdir+'model_correlations.csv')
def fetch_missing_dockq(marks_dockq_AF_bb,marks_dockq_AF_aa):
'''Fetch missing DockQ scores
'''
ids = ['_'.join(x.split('-')) for x in marks_dockq_AF_aa.complex_id.values]
#Get mising scores
missing = marks_dockq_AF_bb[~marks_dockq_AF_bb.complex_id.isin(ids)]
ids = [x[:6]+'-'+x[7:] for x in missing.complex_id.values]
missing['complex_id']=ids
marks_dockq_AF_aa = pd.concat([marks_dockq_AF_aa,missing[marks_dockq_AF_aa.columns]])
return marks_dockq_AF_aa
def pdockq(if_plddt_contacts, dockq_scores, outdir):
#pdockq
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
#Create RA
x_ra = []
y_ra = []
y_std = []
y_av_err = []
step = 20
for t in np.arange(0,max(if_plddt_contacts)-step,step):
inds = np.argwhere((if_plddt_contacts>=t)&(if_plddt_contacts<t+step))[:,0]
x_ra.append(t+step/2)
y_ra.append(np.average(dockq_scores[inds]))
y_std.append(np.std(dockq_scores[inds]))
y_av_err.append(np.average(np.absolute(dockq_scores[inds]-y_ra[-1])))
#Do a simple sigmoid fit
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
xdata = if_plddt_contacts[np.argsort(if_plddt_contacts)]
ydata = dockq_scores[np.argsort(if_plddt_contacts)]
p0 = [max(ydata), np.median(xdata),1,min(ydata)] # this is an mandatory initial guess
popt, pcov = curve_fit(sigmoid, xdata, ydata,p0, method='dogbox')
y = sigmoid(xdata, *popt)
plt.plot(xdata,y,color='r',label='Sigmoidal fit')
#Calc error
print('Sigmoid params:',*popt)
plt.scatter(if_plddt_contacts,dockq_scores,s=1)
#plt.plot(x_ra,y_ra,label='Running average', color='tab:blue')
#plt.fill_between(x_ra,np.array(y_ra)-np.array(y_av_err),np.array(y_ra)+np.array(y_av_err),color='tab:blue',alpha=0.25, label='Average error')
plt.title('pDockQ')
plt.xlabel('IF plDDT⋅log(IF contacts)')
plt.ylabel('DockQ')
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'pDockQ.svg',format='svg',dpi=300)
plt.close()
print('Average error for sigmoidal fit:',np.average(np.absolute(y-ydata)))
print('L=',np.round(popt[0],3),'x0=',np.round(popt[1],3) ,'k=',np.round(popt[2],3), 'b=',np.round(popt[3],3))
return popt
def ROC_pred_marks(marks_dockq_AF, plDDT_marks, outdir):
'''Compare the separation in the marks dataset for AF using metrics from the
predicted structures
'''
#Merge dfs
plDDT_marks['complex_id']=plDDT_marks.id1+'-'+plDDT_marks.id2
merged = pd.merge(marks_dockq_AF,plDDT_marks,on=['complex_id'],how='inner')
#Get min of chains
single_chain_plddt = np.min(merged[['ch1_plddt_av_1', 'ch2_plddt_av_1']].values,axis=1)
merged['min_chain_plddt_av_1'] = single_chain_plddt
#Analyze ROC as a function of
plDDT_metrics = ['if_plddt_av', 'min_chain_plddt_av',
'plddt_av', 'num_atoms_in_interface', 'num_res_in_interface']
plDDT_nice_names = {'if_plddt_av':'IF_plDDT', 'min_chain_plddt_av':'Min plDDT per chain',
'plddt_av':'Average plDDT', 'num_atoms_in_interface':'IF_contacts',
'num_res_in_interface':'IF_residues'}
run='1'
dockq_scores = merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run'+run].values
correct = np.zeros(len(dockq_scores))
correct[np.argwhere(dockq_scores>=0.23)]=1
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
colors = {0:'darkblue',1:'magenta',2:'orange',3:'darkgreen',4:'tab:blue',5:'tab:yellow',6:'tab:black'}
for i in range(len(plDDT_metrics)):
plDDT_metric_vals = merged[plDDT_metrics[i]+'_'+run].values
#Create ROC
fpr, tpr, threshold = metrics.roc_curve(correct, plDDT_metric_vals, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
label = plDDT_metrics[i]
plt.plot(fpr, tpr, label = plDDT_nice_names[label]+': AUC = %0.2f' % roc_auc,color=colors[i])
#Add log(if contacts)*if_plddt_av
if_plddt_contacts = merged['if_plddt_av_1'].values*np.log10(merged['num_atoms_in_interface_1'].values+1)
#Create ROC
fpr, tpr, threshold = metrics.roc_curve(correct, if_plddt_contacts, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
plt.plot(fpr, tpr, label = 'IF_plDDT⋅log(IF_contacts)'+': AUC = %0.2f' % roc_auc,color='tab:cyan')
#Get pDockQ
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
sigmoid_params = pdockq(if_plddt_contacts, dockq_scores, outdir)
#Create ROC
fpr, tpr, threshold = metrics.roc_curve(correct, sigmoid(if_plddt_contacts,*sigmoid_params), pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
plt.plot(fpr, tpr, label = 'pDockQ'+': AUC = %0.2f' % roc_auc,color='k',linestyle='--')
plt.plot([0,1],[0,1],linewidth=1,linestyle='--',color='grey')
plt.legend(fontsize=9)
plt.title('ROC as a function of different metrics')
plt.xlabel('FPR')
plt.ylabel('TPR')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'ROC_marks.svg',format='svg',dpi=300)
plt.close()
#pDockQ vs DockQ
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(sigmoid(if_plddt_contacts,*sigmoid_params),dockq_scores,s=1)
plt.title('pDockQ vs DockQ')
plt.xlabel('pDockQ')
plt.ylabel('DockQ')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'pdockq_vs_dockq.svg',format='svg',dpi=300)
plt.close()
#plot if plddt vs log contacts and color by dockq
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(merged['num_atoms_in_interface_1'].values+1, merged['if_plddt_av_1'].values,c=dockq_scores,s=2)
cbar = plt.colorbar()
cbar.set_label('DockQ')
plt.xscale('log')
plt.ylim([40,100])
plt.title('Interface contacts, plDDT and DockQ')
plt.xlabel('Interface contacts')
plt.ylabel('Average interface plDDT')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'if_conctacts_vs_plddt.svg',format='svg',dpi=300)
plt.close()
return sigmoid_params
def score_marks_5runs_paired_af(marks_dockq_AF, plDDT_marks, sigmoid_params, outdir):
'''Analyze the variation in DockQ using 5 identical runs of the same settings
'''
plDDT_marks['complex_id'] = plDDT_marks.id1+'-'+plDDT_marks.id2
merged = pd.merge(marks_dockq_AF,plDDT_marks,on='complex_id',how='inner')
#Get separator
separator1 = merged[['if_plddt_av_1', 'if_plddt_av_2','if_plddt_av_3','if_plddt_av_4','if_plddt_av_5']].values
separator2 = merged[['num_atoms_in_interface_1', 'num_atoms_in_interface_2','num_atoms_in_interface_3','num_atoms_in_interface_4','num_atoms_in_interface_5']].values
separator = separator1*np.log10(separator2+1)
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
separator = sigmoid(separator, *sigmoid_params)
scores = merged[['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run2',
'DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run3','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run4',
'DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run5']].values
#Get max and min scores
max_scores = np.max(scores,axis=1)
min_scores = np.min(scores,axis=1)
#Get success rates per initializations
srs = []
for i in range(scores.shape[1]):
srs.append(np.argwhere(scores[:,i]>=0.23).shape[0]/len(scores))
print('Test set scoring:')
print('Success rate 5 runs top scores',np.argwhere(max_scores>=0.23).shape[0]/len(max_scores))
print('Av diff',np.average(max_scores-min_scores))
print('Std diff',np.std(max_scores-min_scores))
print('Avg and std success rate', np.average(srs),np.std(srs))
#Separate the models using the number of contacts in the interface
max_inds = np.argmax(separator,axis=1)
first_ranked_scores = []
first_ranked_separators = []
#Get max separator scores
for i in range(len(max_inds)):
first_ranked_scores.append(scores[i,max_inds[i]])
first_ranked_separators.append(separator[i,max_inds[i]])
#Convert to array
first_ranked_scores = np.array(first_ranked_scores)
first_ranked_separators = np.array(first_ranked_separators)
#Get success rate
print('Ranking test set success rate using av plDDT*log(if_contacts) in interface',np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
#Get AUC using that success rate
correct = np.zeros(len(first_ranked_scores))
correct[np.argwhere(first_ranked_scores>=0.23)]=1
fpr, tpr, threshold = metrics.roc_curve(correct, first_ranked_separators, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
print('AUC using the same ranking', roc_auc)
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(first_ranked_scores, max_scores,s=3,color='tab:blue',label='Max')
plt.scatter(first_ranked_scores, min_scores,s=3,color='mediumseagreen',label='Min')
plt.title('Model ranking on the test set')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.plot([0,1],[0,1],color='k',linewidth=1,linestyle='--')
plt.xlabel('DockQ first ranked model')
plt.ylabel('DockQ')
plt.legend()
plt.tight_layout()
plt.savefig(outdir+'DockQ_marks_5runs.svg',format='svg',dpi=300)
plt.close()
#Assign top ranked scores and origin
marks_dockq_AF['top_ranked_model_DockQ_af2']=first_ranked_scores
marks_dockq_AF['top_ranked_pDockQ']=first_ranked_separators
marks_dockq_AF['top_ranked_model_run_af2']=max_inds+1
#Create and save df
roc_df = pd.DataFrame()
roc_df['FPR']=fpr
roc_df['TPR']=tpr
roc_df['FPR*SR']=fpr*(np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
roc_df['TPR*SR']=tpr*np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores)
roc_df['PPV']=tpr/(tpr+fpr)
roc_df['pDockQ']=threshold
roc_df.to_csv(outdir+'roc_df_af2_marks.csv')
#Select a reduced version of the df
roc_df.loc[np.arange(0,len(roc_df),10)].to_csv(outdir+'roc_df_af2_marks_reduced.csv')
return marks_dockq_AF
def score_marks_5runs_paired_fused(marks_dockq_AF, plDDT_marks, sigmoid_params, outdir):
'''Analyze the variation in DockQ using 5 identical runs of the same settings
'''
plDDT_marks['complex_id'] = plDDT_marks.id1+'-'+plDDT_marks.id2
merged = pd.merge(marks_dockq_AF,plDDT_marks,on='complex_id',how='inner')
#Get separator
separator1 = merged[['if_plddt_av_1', 'if_plddt_av_2','if_plddt_av_3','if_plddt_av_4','if_plddt_av_5']].values
separator2 = merged[['num_atoms_in_interface_1', 'num_atoms_in_interface_2','num_atoms_in_interface_3','num_atoms_in_interface_4','num_atoms_in_interface_5']].values
separator = separator1*np.log10(separator2+1)
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
separator = sigmoid(separator, *sigmoid_params)
scores = merged[['DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run1','DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run2',
'DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run3','DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run4',
'DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run5']].values
#Get max and min scores
max_scores = np.max(scores,axis=1)
min_scores = np.min(scores,axis=1)
#Get success rates per initializations
srs = []
for i in range(scores.shape[1]):
srs.append(np.argwhere(scores[:,i]>=0.23).shape[0]/len(scores))
print('FUSED test set scoring:')
print('Success rate 5 runs top scores',np.argwhere(max_scores>=0.23).shape[0]/len(max_scores))
print('Av diff',np.average(max_scores-min_scores))
print('Std diff',np.std(max_scores-min_scores))
print('Avg and std success rate', np.average(srs),np.std(srs))
#Separate the models using the number of contacts in the interface
max_inds = np.argmax(separator,axis=1)
first_ranked_scores = []
first_ranked_separators = []
#Get max separator scores
for i in range(len(max_inds)):
first_ranked_scores.append(scores[i,max_inds[i]])
first_ranked_separators.append(separator[i,max_inds[i]])
#Convert to array
first_ranked_scores = np.array(first_ranked_scores)
first_ranked_separators = np.array(first_ranked_separators)
#Get success rate
print('Ranking test set success rate using if_plddt_av and num contacts in interface',np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
#Get AUC using that success rate
correct = np.zeros(len(first_ranked_scores))
correct[np.argwhere(first_ranked_scores>=0.23)]=1
fpr, tpr, threshold = metrics.roc_curve(correct, first_ranked_separators, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
print('FUSED AUC using the same ranking', roc_auc)
#Assign top ranked scores and origin
marks_dockq_AF['top_ranked_model_DockQ_fused']=first_ranked_scores
marks_dockq_AF['top_ranked_model_run_fused']=max_inds+1
#Create and save df
roc_df = pd.DataFrame()
roc_df['FPR']=fpr
roc_df['TPR']=tpr
roc_df['FPR*SR']=fpr*(np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
roc_df['TPR*SR']=tpr*np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores)
roc_df['PPV']=tpr/(tpr+fpr)
roc_df['pDockQ']=threshold
roc_df.to_csv(outdir+'roc_df_fused_marks.csv')
#Select a reduced version of the df
roc_df.loc[np.arange(0,len(roc_df),10)].to_csv(outdir+'roc_df_fused_marks_reduced.csv')
return marks_dockq_AF
def marks_box(marks_dockq_AF, marks_dockq_GRAMM, marks_dockq_mdockpp, marks_dockq_TMfull, marks_dockq_TMint, marks_dockq_RF,outdir):
'''Box df of Marks set
'''
marks_dockq_TMint = marks_dockq_TMint.dropna()
marks_dockq_TMfull = marks_dockq_TMfull.dropna()
#Get data
rf_scores = marks_dockq_RF.DockQ_dockqstats_marks_RF.values
gramm_scores = marks_dockq_GRAMM[1].values
mdockpp_scores = marks_dockq_mdockpp.DockQ.values
TMfull_scores = marks_dockq_TMfull.dockq.values
TMint_scores = marks_dockq_TMint.dockq.values
paired_scores = marks_dockq_AF.DockQ_dockqstats_marks_af2_hhblitsn2_model_1_rec10.values
af2_std_scores = marks_dockq_AF.DockQ_dockqstats_marks_af2_af2stdmsa_model_1_rec10.values
run1_both_scores= marks_dockq_AF.DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1.values
run1_fused_scores = marks_dockq_AF.DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run1.values
top_paired_af_scores = marks_dockq_AF.top_ranked_model_DockQ_af2.values
top_paired_fused_scores = marks_dockq_AF.top_ranked_model_DockQ_fused.values
data1 = [rf_scores, gramm_scores, mdockpp_scores, TMint_scores, af2_std_scores, paired_scores, top_paired_af_scores, top_paired_fused_scores]
data2 = [run1_both_scores, run1_fused_scores, top_paired_af_scores,top_paired_fused_scores]
all_data = [data1,data2]
xlabels1 = ['RF','GRAMM', 'MDockPP', 'TMdock\nInterfaces', 'AF2', 'Paired', 'AF2+Paired\ntop ranked','Block+Paired\ntop ranked']
xlabels2 = ['AF2+Paired', 'Block+Paired', 'AF2+Paired\ntop ranked', 'Block+Paired\ntop ranked']
all_xlabels = [xlabels1, xlabels2]
#Color
colors = sns.husl_palette(len(xlabels1)+2)
all_colors = [colors[:len(xlabels1)],colors[-len(xlabels2):]]
for i in range(len(all_data)):
#Boxplot
fig,ax = plt.subplots(figsize=(24/2.54,12/2.54))
data = all_data[i] #Get data and xlabel variation
xlabels = all_xlabels[i]
colors = all_colors[i]
#Success rates
srs = []
for j in range(len(data)):
sr = np.argwhere(data[j]>=0.23).shape[0]/len(data[j])
median = np.median(data[j])
print(xlabels[j],'sr:',np.round(sr,3),len(data[j]),median)
#xlabels[j]+='\nSR: '+str(np.round(100*sr,1))+'%'
#xlabels[j]+='\nM: '+str(np.round(median,3))
# Creating plot
#ax.violinplot(data)
bp = ax.boxplot(data, patch_artist = True, notch=True, showfliers=False)
for patch, color in zip(bp['boxes'], colors):
patch.set_facecolor(color)
patch.set_alpha(0.75)
# changing color and linewidth of
# medians
for median in bp['medians']:
median.set(color ='k',linewidth = 3)
# #Add swarm
# for i in range(len(data)):
# # Add some random "jitter" to the x-axis
# x = np.random.normal(i, 0.04, size=len(data[i]))
# plt.plot(x+1, data[i], 'r.', alpha=0.2)
# changing color and linewidth of
# whiskers
for whisker in bp['whiskers']:
whisker.set(color ='grey',
linewidth = 1)
# changing color and linewidth of
# caps
for cap in bp['caps']:
cap.set(color ='grey',
linewidth = 1)
plt.title('DockQ scores for the test set',fontsize=20)
plt.xticks(np.arange(1,len(xlabels)+1),xlabels,fontsize=12)
plt.ylabel('DockQ')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'DockQ_box_test'+str(i)+'.svg',format='svg',dpi=300)
plt.close()
def AF_vs_RF_marks(marks_dockq_RF,marks_dockq_AF, outdir):
'''Compare the scores for RF vs AF
'''
merged = pd.merge(marks_dockq_RF,marks_dockq_AF,on='complex_id',how='inner')
print('Number of complexes in merged Marks RF and AF', len(merged))
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(merged['DockQ_dockqstats_marks_RF'],merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'],s=1)
plt.plot([0,1],[0,1],linewidth=1,linestyle='--',color='grey')
#Plot correct cutoff
plt.plot([0.23,0.23],[0,0.23],linewidth=1,linestyle='--',color='k')
plt.plot([0,0.23],[0.23,0.23],linewidth=1,linestyle='--',color='k',label='Success cutoff')
plt.title('RF vs AF2 performance on the test set')
plt.xlabel('RF DockQ')
plt.ylabel('AF DockQ')
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'RF_vs_AF_test.svg',format='svg',dpi=300)
#Get num correct
num_correct_RF = np.argwhere(merged['DockQ_dockqstats_marks_RF'].values>=0.23).shape[0]
num_correct_AF = np.argwhere(merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'].values>=0.23).shape[0]
num_total = len(merged)
print('Success rate RF:',num_correct_RF,'out of',num_total,'|',np.round(100*num_correct_RF/num_total,2),'%')
print('Success rate AF:',num_correct_AF,'out of',num_total,'|',np.round(100*num_correct_AF/num_total,2),'%')
#Get where RF outperforms AF
scores = merged[['DockQ_dockqstats_marks_RF','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1']].values
rf_pos = scores[np.argwhere(scores[:,0]>=0.23)[:,0],:]
max_scores = np.argmax(rf_pos,axis=1)
print('RF outperform AF', np.argwhere(max_scores==0).shape[0], 'out of',len(rf_pos),'times|',np.argwhere(max_scores==0).shape[0]/len(rf_pos))
def AF_vs_GRAMM_marks(marks_dockq_GRAMM, marks_dockq_AF, outdir):
'''Compare the scores for GRAMM vs AF
'''
marks_dockq_GRAMM = marks_dockq_GRAMM.rename(columns={1: 'DockQ GRAMM'})
marks_dockq_GRAMM['complex_id'] = ['_'.join(x.split('-')) for x in marks_dockq_GRAMM[0]]
merged = pd.merge(marks_dockq_GRAMM,marks_dockq_AF,on='complex_id',how='inner')
print('Number of complexes in merged Marks GRAMM and AF', len(merged))
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(merged['DockQ GRAMM'],merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'],s=1)
plt.plot([0,1],[0,1],linewidth=1,linestyle='--',color='grey')
#Plot correct cutoff
plt.plot([0.23,0.23],[0,0.23],linewidth=1,linestyle='--',color='k')
plt.plot([0,0.23],[0.23,0.23],linewidth=1,linestyle='--',color='k',label='Success cutoff')
plt.title('GRAMM vs AF2 performance on the test set')
plt.xlabel('GRAMM DockQ')
plt.ylabel('AF DockQ')
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'GRAMM_vs_AF_test.svg',format='svg',dpi=300)
#Get num correct
num_correct_GRAMM = np.argwhere(merged['DockQ GRAMM'].values>=0.23).shape[0]
num_correct_AF = np.argwhere(merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'].values>=0.23).shape[0]
num_total = len(merged)
print('Success rate GRAMM:',num_correct_GRAMM,'out of',num_total,'|',np.round(100*num_correct_GRAMM/num_total,2),'%')
print('Success rate AF:',num_correct_AF,'out of',num_total,'|',np.round(100*num_correct_AF/num_total,2),'%')
#Get where GRAMM outperforms AF
scores = merged[['DockQ GRAMM','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1']].values
GRAMM_pos = scores[np.argwhere(scores[:,0]>=0.23)[:,0],:]
max_scores = np.argmax(GRAMM_pos,axis=1)
print('GRAMM outperform AF', np.argwhere(max_scores==0).shape[0], 'out of',len(GRAMM_pos),'times|',np.argwhere(max_scores==0).shape[0]/len(GRAMM_pos))
def AF_vs_TMint_marks(marks_dockq_TMint, marks_dockq_AF, outdir):
'''Compare the scores for GRAMM vs AF
'''
marks_dockq_TMint = marks_dockq_TMint.rename(columns={'dockq': 'DockQ TMint'})
merged = | pd.merge(marks_dockq_TMint,marks_dockq_AF,on='complex_id',how='inner') | pandas.merge |
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
from pandas.core.arrays import DatetimeArray, period_array
import pandas.util.testing as tm
class TestDatetimeIndex(object):
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = ("Inferred frequency None from passed values does not conform "
"to passed frequency D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01')], freq='D')
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01').value],
freq='D')
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, '2015-01-01', '1999-04-06 15:14:13', '2015-01-01'],
tz='US/Eastern')
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(['2016Q1', '2016Q2'], freq='Q')
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_deprecation(self):
# GH#23675
data = np.array([0], dtype='m8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
def test_construction_caching(self):
df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if str(tz) in ('UTC', 'tzutc()'):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
expected = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
pytest.raises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# Different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_index_with_mixed_timezones_with_NaT(self):
# see gh-11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# Same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
msg = 'cannot be converted to datetime64'
with pytest.raises(ValueError, match=msg):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
with pytest.raises(ValueError, match=msg):
# passing tz should results in DatetimeIndex, then mismatch raises
# TypeError
Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
def test_construction_base_constructor(self):
arr = [pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
def test_construction_outofbounds(self):
# GH 13663
dates = [datetime(3000, 1, 1), datetime(4000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1)]
exp = Index(dates, dtype=object)
# coerces to object
tm.assert_index_equal(Index(dates), exp)
with pytest.raises(OutOfBoundsDatetime):
# can't create DatetimeIndex
DatetimeIndex(dates)
def test_construction_with_ndarray(self):
# GH 5152
dates = [datetime(2013, 10, 7),
datetime(2013, 10, 8),
datetime(2013, 10, 9)]
data = DatetimeIndex(dates, freq=pd.offsets.BDay()).values
result = DatetimeIndex(data, freq=pd.offsets.BDay())
expected = DatetimeIndex(['2013-10-07',
'2013-10-08',
'2013-10-09'],
freq='B')
tm.assert_index_equal(result, expected)
def test_verify_integrity_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(['1/1/2000'], verify_integrity=False)
def test_range_kwargs_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(start='1/1/2000', end='1/10/2000', freq='D')
def test_integer_values_and_tz_deprecated(self):
# GH-24559
values = np.array([946684800000000000])
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(values, tz='US/Central')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
tm.assert_index_equal(result, expected)
# but UTC is *not* deprecated.
with tm.assert_produces_warning(None):
result = DatetimeIndex(values, tz='UTC')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
tm.assert_index_equal(rng, exp)
msg = 'periods must be a number, got foo'
with pytest.raises(TypeError, match=msg):
date_range(start='1/1/2000', periods='foo', freq='D')
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(start='1/1/2000', end='1/10/2000')
with pytest.raises(TypeError):
DatetimeIndex('1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
tm.assert_index_equal(result, expected)
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# string with NaT
strings = np.array(['2000-01-01', '2000-01-02', 'NaT'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
| tm.assert_index_equal(from_ints, expected) | pandas.util.testing.assert_index_equal |
import pandas as pd
import numpy as np
from pathos.multiprocessing import ProcessPool
import multiprocessing
import inspect
from scipy.stats import rankdata
from tuneta.config import *
from tuneta.optimize import Optimize
import pandas_ta as pta
from finta import TA as fta
import talib as tta
import re
from tabulate import tabulate
from tuneta.optimize import col_name
from collections import OrderedDict
class TuneTA():
def __init__(self, n_jobs=multiprocessing.cpu_count() - 1, verbose=False):
self.fitted = []
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y, trials=5, indicators=['tta'], ranges=[(3, 180)],
spearman=True, weights=None, early_stop=99999, split=None):
"""
Optimize indicator parameters to maximize correlation
:param X: Historical dataset
:param y: Target used to measure correlation. Can be a subset index of X
:param trials: Number of optimization trials per indicator set
:param indicators: List of indicators to optimize
:param ranges: Parameter search space
:param spearman: Perform spearman vs pearson correlation
:param weights: Optional weights sharing the same index as y
:param early_stop: Max number of optimization trials before stopping
:param split: Index cut points defining time periods
"""
self.fitted = [] # List containing each indicator completed study
X.columns = X.columns.str.lower() # columns must be lower case
pool = ProcessPool(nodes=self.n_jobs) # Set parallel cores
# Package level optimization
if 'tta' in indicators:
indicators = indicators + talib_indicators
indicators.remove('tta')
if 'pta' in indicators:
indicators = indicators + pandas_ta_indicators
indicators.remove('pta')
if 'fta' in indicators:
indicators = indicators + finta_indicatrs
indicators.remove('fta')
if 'all' in indicators:
indicators = talib_indicators + pandas_ta_indicators + finta_indicatrs
indicators = list(OrderedDict.fromkeys(indicators))
# Create textual representation of function in Optuna format
# Example: 'tta.RSI(X.close, length=trial.suggest_int(\'timeperiod1\', 2, 1500))'
# Utilizes the signature of the indicator (ie user parameters) if available
# TTA uses help docstrings as signature is not available in C bindings
# Parameters contained in config.py are tuned
# Iterate user defined search space ranges
for low, high in ranges:
if low <= 1:
raise ValueError("Range low must be > 1")
if high >= len(X):
raise ValueError(f"Range high:{high} must be > length of X:{len(X)}")
# Iterate indicators per range
for ind in indicators:
# Index column to optimize if indicator returns dataframe
idx = 0
if ":" in ind:
idx = int(ind.split(":")[1])
ind = ind.split(":")[0]
fn = f"{ind}("
# If TTA indicator, use doc strings for lack of better way to
# get indicator arguments (C binding)
if ind[0:3] == "tta":
usage = eval(f"{ind}.__doc__").split(")")[0].split("(")[1]
params = re.sub('[^0-9a-zA-Z_\s]', '', usage).split()
# Pandas-TA and FinTA both can be inspected for parameters
else:
sig = inspect.signature(eval(ind))
params = sig.parameters.values()
# Format function string
suggest = False
for param in params:
param = re.split(':|=', str(param))[0].strip()
if param == "open_":
param = "open"
if param == "real":
fn += f"X.close, "
elif param == "ohlc":
fn += f"X, "
elif param == "ohlcv":
fn += f"X, "
elif param in tune_series:
fn += f"X.{param}, "
elif param in tune_params:
suggest = True
if param in ['mamode']:
fn += f"{param}=trial.suggest_categorical('{param}', {tune_ta_mm}), "
else:
fn += f"{param}=trial.suggest_int('{param}', {low}, {high}), "
fn += ")"
# Only optimize indicators that contain tunable parameters
if suggest:
self.fitted.append(pool.apipe(Optimize(function=fn, n_trials=trials,
spearman=spearman).fit, X, y, idx=idx, verbose=self.verbose,
weights=weights, early_stop=early_stop, split=split), )
else:
self.fitted.append(pool.apipe(Optimize(function=fn, n_trials=1,
spearman=spearman).fit, X, y, idx=idx, verbose=self.verbose,
weights=weights, early_stop=early_stop, split=split), )
# Blocking wait to retrieve results
# if item comes back as non-numerical dont add
self.fitted = [fit.get() for fit in self.fitted if isinstance(fit.get().res_y_corr,(float,int))]
# Some items might come back as an array
# if they are cant be a float skip
for i in self.fitted:
try:
float(i.res_y_corr)
except:
continue
def prune(self, top=2, studies=1):
"""
Select most correlated with target, least intercorrelated
:param top: Selects top x most correlated with target
:param studies: From top x, keep y least intercorelated
:return:
"""
# Error checking
if top > len(self.fitted) or studies > len(self.fitted):
raise ValueError("Cannot prune because top or studies is >= tuned indicators")
return
if top < studies:
raise ValueError(f"top {top} must be >= studies {studies}")
# Create fitness array that maps to the correlation of each indicator study
fitness = []
for t in self.fitted:
if t.split is None:
fitness.append(t.study.best_trial.value)
else:
fitness.append(sum(t.study.trials[t.study.top_trial].values))
fitness = np.array(fitness)
# Select top x indices with most correlation to target
fitness = fitness.argsort()[::-1][:top] # Get sorted fitness indices of HOF
# Gets best trial feature of each study in HOF
features = []
top_studies = [self.fitted[i] for i in fitness] # Get fitness mapped studies
for study in top_studies:
features.append(study.res_y) # Get indicator values stored from optimization
features = np.array(features) # Features of HOF studies / actual indicator results
# Correlation of HOF features
# Create correlation table of features
eval = np.apply_along_axis(rankdata, 1, features)
with np.errstate(divide='ignore', invalid='ignore'):
correlations = np.abs(np.corrcoef(eval))
np.fill_diagonal(correlations, 0.)
# Iteratively removes least fit individual of most correlated pairs of studies
# IOW, finds most correlated pairs, removes lest correlated to target until x studies
components = list(range(top))
indices = list(range(top))
while len(components) > studies:
most_correlated = np.unravel_index(np.argmax(correlations), correlations.shape)
worst = max(most_correlated)
components.pop(worst)
indices.remove(worst)
correlations = correlations[:, indices][indices, :]
indices = list(range(len(components)))
# Save only fitted studies (overwriting all studies)
self.fitted = [self.fitted[i] for i in fitness[components]]
def transform(self, X, columns=None):
"""
Given X, create features of fitted studies
:param X: Dataset with features used to create fitted studies
:return:
"""
# Remove trailing identifier in column list if present
if columns is not None:
columns = [re.sub(r'_[0-9]+$', '', s) for s in columns]
X.columns = X.columns.str.lower() # columns must be lower case
pool = ProcessPool(nodes=self.n_jobs) # Number of jobs
self.result = []
# Iterate fitted studies and calculate TA with fitted parameter set
for ind in self.fitted:
# Create field if no columns or is in columns list
if columns is None or ind.res_y.name in columns:
self.result.append(pool.apipe(ind.transform, X))
# Blocking wait for asynchronous results
self.result = [res.get() for res in self.result]
# Combine results into dataframe to return
res = pd.concat(self.result, axis=1)
return res
def report(self, target_corr=True, features_corr=True):
fns = [] # Function names
cor = [] # Target Correlation
moc = [] # Multi-Time Period Correlation
mean_moc = []
std_moc = [] # Multi STD
features = []
for fit in self.fitted:
if fit.split is None:
fns.append(col_name(fit.function, fit.study.best_params))
else:
fns.append(col_name(fit.function, fit.study.top_params))
moc.append(fit.study.trials[fit.study.top_trial].values)
mean_moc.append(np.mean(fit.study.trials[fit.study.top_trial].values))
std_moc.append(np.std(fit.study.trials[fit.study.top_trial].values))
cor.append(np.round(fit.res_y_corr, 6))
features.append(fit.res_y)
if fit.split is None:
fitness = | pd.DataFrame(cor, index=fns, columns=['Correlation']) | pandas.DataFrame |
import unittest
import pandas as pd
from featurefilter import FeatureCorrelationFilter
def test_fit_high_continuous_correlation():
train_df = pd.DataFrame({'A': [0, 1],
'B': [0, 1]})
filter_ = FeatureCorrelationFilter()
train_df = filter_.fit(train_df)
assert filter_.columns_to_drop == ['B']
def test_excluding_target_column():
train_df = pd.DataFrame({'A': [0, 1],
'B': [0, 1],
'Y': [0, 1]})
filter_ = FeatureCorrelationFilter(target_column='Y')
train_df = filter_.fit(train_df)
assert filter_.columns_to_drop == ['B']
def test_high_negative_continuous_correlation():
train_df = pd.DataFrame({'A': [0, 1], 'B': [0, -1], 'Y': [0, 1]})
test_df = pd.DataFrame({'A': [0, 0], 'B': [0, 0], 'Y': [0, 1]})
filter_ = FeatureCorrelationFilter(target_column='Y')
train_df = filter_.fit_transform(train_df)
test_df = filter_.transform(test_df)
assert train_df.equals( | pd.DataFrame({'A': [0, 1], 'Y': [0, 1]}) | pandas.DataFrame |
import streamlit as st
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
import numpy as np
import io
st.title('Acquisition clients')
# Datasets
df1 = | pd.read_csv("./assets/Abandoned_Baskets.csv") | pandas.read_csv |
# This script gets nba shot data and creates a raw data set
# Importing required modules
import pandas as pd
import json
import time
import urllib
from nba_api.stats.endpoints import shotchartdetail
from nba_api.stats.static import players
# Directory info for where data will be stored
username = 'macary'
filepath = 'C:/Users/' + username + '/Documents/Data/NBA/'
# Get a list of all players with relevant data in dictionary format
player_dict = players.get_players()
# Create a list of playerIDs
pids = [player_dict[i]['id'] for i in range(len(player_dict))]
# Create a list of seasons
seasons = ['2002-03', '2003-04', '2004-05', '2005-06']
# Initializing a dataframe in which all data is stored
df = | pd.DataFrame() | pandas.DataFrame |
from pathlib import Path
import json
import sys
import pandas as pd
def load_json(path):
with open(path) as infile:
return json.load(infile)
if __name__ == "__main__":
DATA_DIR = sys.argv[1]
DATA_FILE = sys.argv[2]
fname = Path(DATA_DIR, DATA_FILE)
raw_data = load_json(fname)
auto_metric_names = [
k for k in raw_data['wikitext']['etm']['metrics']
if not (k.startswith("ratings") or k.startswith("intrusion") or k.endswith("top15") or k.endswith("top5"))
]
auto_metric_names += [k.replace("nytimes", "wikitext") for k in auto_metric_names if "nytimes" in k]
auto_metric_names = sorted(auto_metric_names)
intrusion_rows = []
ratings_rows = []
n_ratings_annotators = len(raw_data['wikitext']['etm']['metrics']['ratings_scores_raw'][0])
n_intrusion_annotators = len(raw_data['wikitext']['etm']['metrics']['intrusion_scores_raw'][0])
n_topics = 50
for dataset in raw_data:
for model in raw_data[dataset]:
metric_data = raw_data[dataset][model]["metrics"]
for topic_idx in range(n_topics):
# automated metrics are at the topic level
auto_metrics = {
k: v[topic_idx] for k, v in metric_data.items()
if not isinstance(v[0], list) and
k in auto_metric_names
}
# build ratings data
for human_idx in range(n_ratings_annotators):
row = {
"dataset": dataset,
"model": model,
"topic_idx": topic_idx,
"human_idx": human_idx,
}
human_metrics = {
k.replace("ratings_", ""): v[topic_idx][human_idx] for k, v in metric_data.items()
if isinstance(v[0], list) and k.startswith("ratings")
}
row.update(**auto_metrics)
row.update(**human_metrics)
ratings_rows.append(row)
# build intrusion data
for human_idx in range(n_intrusion_annotators):
row = {
"dataset": dataset,
"model": model,
"topic_idx": topic_idx,
"human_idx": human_idx,
}
human_metrics = {
k.replace("intrusion_", ""): v[topic_idx][human_idx] for k, v in metric_data.items()
if isinstance(v[0], list) and k.startswith("intrusion")
}
row.update(**auto_metrics)
row.update(**human_metrics)
intrusion_rows.append(row)
ratings = pd.DataFrame(ratings_rows)
intrusions = | pd.DataFrame(intrusion_rows) | pandas.DataFrame |
from msw.model_stacking import FeatureGenerator, getConfigParameters
from sklearn.preprocessing import MinMaxScaler, Imputer, LabelEncoder, OneHotEncoder
from sklearn.preprocessing import StandardScaler
#%%
import pandas as pd
#%%
#
# get parameters
#
CONFIG = getConfigParameters()
#%%
#
# feature set 1
#
print("Preparing Level 0 Feature Set 1")
comment="""
all attributes in train and test data set.
missing values set to -999
"""
fs = FeatureGenerator('raw','KFS01',comment=comment)
# get raw data
X_train, y_train, X_test = fs.getRawData(train_ds='train.csv',
test_ds='test.csv')
##############################################################
# #
# CUSTOMIZE FOR KAGGLE COMPETITION #
# #
##############################################################
X_train.fillna(-999,inplace=True)
X_test.fillna(-999,inplace=True)
########### END OF KAGGLE COMPETITION CUSTOMIZATION #########
fs.saveFeatureSet(X_train, y_train, X_test)
#%%
#
# feature set 2
#
print("Preparing Level 0 Feature Set 2")
comment="""
Only numeric features in train and test data set.
missing values set to -999
"""
fs = FeatureGenerator('raw','KFS02',comment=comment)
# get raw data
X_train, y_train, X_test = fs.getRawData(train_ds='train.csv', test_ds='test.csv')
##############################################################
# #
# CUSTOMIZE FOR KAGGLE COMPETITION #
# #
##############################################################
# find only numberic attributes
numeric_predictors = [x for x in X_train.columns if X_train[x].dtype != 'O']
X_train = X_train.loc[:,numeric_predictors]
X_train.fillna(-999,inplace=True)
X_train.shape
X_test = X_test.loc[:,numeric_predictors]
X_test.fillna(-999,inplace=True)
X_test.shape
########### END OF KAGGLE COMPETITION CUSTOMIZATION #########
fs.saveFeatureSet(X_train, y_train, X_test)
#%%
comment = """
#
# feature set 3 - suitable for nerual network
# one-hot encode categorical variables
# scale numeric to [0,1]
#
"""
fs = FeatureGenerator('raw','KFS03',comment=comment)
# get raw data
X_train, y_train, X_test = fs.getRawData(train_ds='train.csv', test_ds='test.csv')
##############################################################
# #
# CUSTOMIZE FOR KAGGLE COMPETITION #
# #
##############################################################
print("Preparing Level 0 Feature Set 3")
# Keep at most this number of most number frequent unique factor levels
TOP_CATEGORICAL_LEVELS = 10
# maximum levels for categorical values
MAX_CATEGORICAL_LEVELS = 100
# Exclude these predictors from the baseline feature set
PREDICTORS_TO_EXCLUDE = []
print('Shape X_train: ',X_train.shape,", Shape X_test:",X_test.shape)
training_rows = X_train.shape[0]
# partition numeric vs categorical predictors
num_predictors = [x for x in X_train.columns if X_train[x].dtype != 'O']
cat_predictors = list(set(X_train.columns) - set(num_predictors))
print('Number of numeric predictors: ',len(num_predictors),', Number of categorical predicators: ',len(cat_predictors))
# one-hot encode categorical predictors
train_encoded_list = []
test_encoded_list = []
for c in cat_predictors:
# get one column of categorical variables
train_cat = X_train[c].copy()
test_cat = X_test[c].copy()
#temporarily combine train and test to get universe of valid values
all_cat = pd.concat([train_cat,test_cat])
all_cat.name = train_cat.name
# determine number of unique levels
number_of_levels = len(all_cat.unique())
print('Predictor: ',c,' levels ',number_of_levels)
if number_of_levels > MAX_CATEGORICAL_LEVELS:
print(" By passing")
continue
# handle situation where number of unique levels exceed threshold
if number_of_levels > TOP_CATEGORICAL_LEVELS:
counts_by_level = all_cat.value_counts()
# get level values for those not in the top ranks
low_count_levels = counts_by_level.index[TOP_CATEGORICAL_LEVELS:]
# eliminate NULL value if present
levels_to_other = [x for x in low_count_levels if len(x)>0]
# set less frequent levels to special valid value
idx = [x in set(levels_to_other)for x in train_cat]
train_cat.loc[idx] = '__OTHER__'
idx = [x in set(levels_to_other)for x in test_cat]
test_cat.loc[idx] = '__OTHER__'
# impute special value for any missing values
idx = [ isinstance(x,float) for x in all_cat]
all_cat.loc[idx] = '__N/A__'
# now hot-one encode categorical variable
lb = LabelEncoder()
ohe = OneHotEncoder(sparse=False)
# training categorical attribute
temp = lb.fit_transform(all_cat)
temp = temp.reshape(-1,1)
temp = ohe.fit_transform(temp)
#generate column names for one-hot encoding
column_names = [all_cat.name + '.' + x for x in lb.classes_]
# split back out to training and test data sets
train_encoded_list.append(pd.DataFrame(temp[:training_rows],columns=column_names))
test_encoded_list.append(pd.DataFrame(temp[training_rows:],columns=column_names))
# flatten out into single dataframe
X_train_cat = pd.concat(train_encoded_list,axis=1)
X_test_cat = pd.concat(test_encoded_list,axis=1)
# for numeric predictors use median for missing values
X_train_num = X_train.loc[:,num_predictors]
X_test_num = X_test.loc[:,num_predictors]
# impute median value for missing values and scale to [0,1]
imp = Imputer(strategy='median')
mms = MinMaxScaler()
# flag missing values
train_isnan = pd.DataFrame(X_train_num.isnull().astype('int'),index=X_train_num.index)
train_isnan.columns = [c+'_isnan' for c in X_train_num.columns]
# set up missing values in training data
X_train_num = pd.DataFrame(mms.fit_transform(imp.fit_transform(X_train_num)),columns=num_predictors)
X_train_num = pd.concat([X_train_num,train_isnan],axis=1)
X_train_num = X_train_num[sorted(X_train_num.columns)]
# flag missing values
test_isnan = pd.DataFrame(X_test_num.isnull().astype('int'),index=X_test_num.index)
test_isnan.columns = [c+'_isnan' for c in X_test_num.columns]
# wetup missing values in test data
X_test_num = pd.DataFrame(mms.transform(imp.transform(X_test_num)),columns=num_predictors)
X_test_num = pd.concat([X_test_num,test_isnan],axis=1)
X_test_num = X_test_num[sorted(X_test_num.columns)]
# combine numeric and categorical attributes back to new training and test data set
X_train_new = pd.concat([X_train_num,X_train_cat],axis=1)
X_test_new = pd.concat([X_test_num,X_test_cat],axis=1)
########### END OF KAGGLE COMPETITION CUSTOMIZATION #########
# save new feature set
fs.saveFeatureSet(X_train_new, y_train, X_test_new)
#%%
comment="""
#
# feature set 4 - suitable for nerual network
# one-hot encode categorical variables
# Standardize Numeric variables
#
"""
fs = FeatureGenerator('raw','KFS04',comment=comment)
# get raw data
X_train, y_train, X_test = fs.getRawData(train_ds='train.csv', test_ds='test.csv')
##############################################################
# #
# CUSTOMIZE FOR KAGGLE COMPETITION #
# #
##############################################################
# Keep at most this number of most number frequent unique factor levels
TOP_CATEGORICAL_LEVELS = 10
# maximum levels for categorical values
MAX_CATEGORICAL_LEVELS = 100
# Exclude these predictors from the baseline feature set
PREDICTORS_TO_EXCLUDE = []
print("Preparing Level 0 Feature Set 4")
print('Shape X_train: ',X_train.shape,", Shape X_test:",X_test.shape)
training_rows = X_train.shape[0]
# partition numeric vs categorical predictors
num_predictors = [x for x in X_train.columns if X_train[x].dtype != 'O']
cat_predictors = list(set(X_train.columns) - set(num_predictors))
print('Number of numeric predictors: ',len(num_predictors),', Number of categorical predicators: ',len(cat_predictors))
# one-hot encode categorical predictors
train_encoded_list = []
test_encoded_list = []
for c in cat_predictors:
# get one column of categorical variables
train_cat = X_train[c].copy()
test_cat = X_test[c].copy()
#temporarily combine train and test to get universe of valid values
all_cat = pd.concat([train_cat,test_cat])
all_cat.name = train_cat.name
# determine number of unique levels
number_of_levels = len(all_cat.unique())
print('Predictor: ',c,' levels ',number_of_levels)
if number_of_levels > MAX_CATEGORICAL_LEVELS:
print(" By passing")
continue
# handle situation where number of unique levels exceed threshold
if number_of_levels > TOP_CATEGORICAL_LEVELS:
counts_by_level = all_cat.value_counts()
# get level values for those not in the top ranks
low_count_levels = counts_by_level.index[TOP_CATEGORICAL_LEVELS:]
# eliminate NULL value if present
levels_to_other = [x for x in low_count_levels if len(x)>0]
# set less frequent levels to special valid value
idx = [x in set(levels_to_other)for x in train_cat]
train_cat.loc[idx] = '__OTHER__'
idx = [x in set(levels_to_other)for x in test_cat]
test_cat.loc[idx] = '__OTHER__'
# impute special value for any missing values
idx = [ isinstance(x,float) for x in all_cat]
all_cat.loc[idx] = '__N/A__'
# now hot-one encode categorical variable
lb = LabelEncoder()
ohe = OneHotEncoder(sparse=False)
# training categorical attribute
temp = lb.fit_transform(all_cat)
temp = temp.reshape(-1,1)
temp = ohe.fit_transform(temp)
#generate column names for one-hot encoding
column_names = [all_cat.name + '.' + x for x in lb.classes_]
# split back out to training and test data sets
train_encoded_list.append(pd.DataFrame(temp[:training_rows],columns=column_names))
test_encoded_list.append(pd.DataFrame(temp[training_rows:],columns=column_names))
# flatten out into single dataframe
X_train_cat = pd.concat(train_encoded_list,axis=1)
X_test_cat = | pd.concat(test_encoded_list,axis=1) | pandas.concat |
import pandas as pd
import torch
def prediction(model, images, targets=None):
"""
Makes and stores predictions on one batch of images.
:param model: The model used for prediction.
:param images: The batch of images on which to predict bounding boxes.
:param targets: Optional ground truth labels and bounding boxes.
:return: A Dataframe consisting of predicted and (if supplied) ground truth labels and bounding boxes.
"""
predictions = {"pred_boxes": [],
"pred_scores": [],
"pred_labels": []}
if targets:
predictions["gt_boxes"] = [target["bboxes"].detach().cpu().numpy() for target in targets]
predictions["gt_labels"] = [target["labels"].detach().cpu().numpy() for target in targets]
with torch.no_grad():
det = model(images)
for i in range(images.shape[0]):
pred_det = det[i]
predictions["pred_boxes"].append(pred_det[:, :4].detach().cpu().numpy())
predictions["pred_scores"].append(pred_det[:, 4].detach().cpu().numpy())
predictions["pred_labels"].append(pred_det[:, 5].detach().cpu().numpy().astype(int))
return predictions
def prediction_df(model, test_loader,
device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
verbose=0):
"""
Creates a Dataframe of predictions from unlabeled test data.
:param model: The model used for predictions.
:param test_loader: Dataloader for unlabeled test data.
:param device: Device to load the model on. CPU or CUDA. Defaults using CUDA if available, otherwise CPU.
:param verbose: If positive prints output every "verbose" steps.
:return: Dataframe of bounding box predictions.
"""
assert verbose >= 0
predictions = []
n = 0
for images in test_loader:
n += 1
images = torch.stack(images).to(device).float()
predictions.append(pd.DataFrame(prediction(model, images)))
if verbose and n % verbose == 0:
print(f"Processed batch {n}.")
return | pd.concat(predictions) | pandas.concat |
import streamlit as st
import os
import requests
import pandas as pd
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
from datetime import date, timedelta, datetime
PATH = os.path.abspath('')
def get_csv_from_url():
'''This function gets updated COVID-19 data from the OWID website and opens it in a dataframe.'''
url="https://covid.ourworldindata.org/data/owid-covid-data.csv"
df=pd.read_csv(url)
return df
def download_csv():
"""This function downloads updated COVID-19 data from the OWID website (for testing purposes, it will be not used not in the final app)"""
url = 'https://covid.ourworldindata.org/data/owid-covid-data.csv'
r = requests.get(url)
open(PATH + os.sep + 'data/covid_data.csv', 'wb').write(r.content)
df = | pd.read_csv(PATH + os.sep + 'data/covid_data.csv') | pandas.read_csv |
import geopandas as gpd
from shapely.geometry import LineString, Point
from datetime import datetime, timedelta
import pandas as pd
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def regresa_puntos(s):
if s.size > 1:
return s.tolist()
def get_start_end_distance(line):
start = Point(line.coords[0])
end = Point(line.coords[-1])
return start.distance(end)
def get_trayectories(input_points, threshold=300):
"""
Regresa las trayectorias agregadas por usuario, para cada día en
intervalos de 4 horas.
**NOTA:** asume que los puntos de entrada están en 4326 y
regresa las trayectorias en 32614. Esto tiene que cambiar y tomar,
por lo menos, el srid de la salida.
También asume que los datos tienen una columna fecha_hora_dt que
se puede parsear a datetime como format='%Y-%m-%d %H:%M:%S' y que está en
la zona horaria de CDMX. Tienen que tener una columna Usuario para
agrupar las trayectorias
Parámetros:
input_points (GeoDataFrame(Point)): los puntos iniciales
threshold (float): separación mínima entre inicio y fin para ser
considerada una trayectoria
Returns:
GeoDataFrame idexado por día e intervalo con las trayectorias obtenidas
"""
tuits = gpd.read_file(input_points)
tuits = tuits.to_crs("EPSG:32614")
tuits['fecha_hora_dt'] = pd.to_datetime(
tuits['fecha_hora_dt'],
format='%Y-%m-%d %H:%M:%S',
utc=True)
tuits.sort_index(inplace=True)
trayectorias = (tuits.groupby([pd.Grouper(key='fecha_hora_dt', freq='1D'),
| pd.Grouper(key='fecha_hora_dt', freq='4H', base=2) | pandas.Grouper |
# -*- coding: utf-8 -*-
# run in py3 !!
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1";
import tensorflow as tf
config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction=0.5
config.gpu_options.allow_growth = True
tf.Session(config=config)
import numpy as np
from sklearn import preprocessing
import tensorflow as tf
import time
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import pandas as pd
from keras import backend as K
import keras.layers.convolutional as conv
from keras.layers import merge
from keras.wrappers.scikit_learn import KerasRegressor
from keras import utils
from keras.layers.pooling import MaxPooling1D, MaxPooling2D
from keras.layers import pooling
from keras.models import Sequential, Model
from keras.regularizers import l1, l2
from keras import layers
from keras.layers import Dense, Dropout, Activation, Flatten, Input, Convolution1D, Convolution2D, LSTM
from keras.optimizers import SGD, RMSprop
from keras.layers.normalization import BatchNormalization
from keras import initializers
from keras.callbacks import EarlyStopping
from keras import callbacks
from keras import backend as K
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping, ModelCheckpoint, Callback
from keras.models import Model
from keras import initializers, layers
from keras.optimizers import SGD, Adadelta, Adam
from keras.regularizers import l1, l2
from keras import regularizers
import sys
sys.path.append('.')
from hist_figure import his_figures
if len(sys.argv) > 1:
prefix = sys.argv[1]
else:
prefix = time.time()
DATAPATH = '5fold/'
RESULT_PATH = './results/'
feature_num = 25
batch_num = 2
# batch_size = 32
batch_size = 512
SEQ_LENGTH = 20
STATEFUL = False
scaler = None # tmp, for fit_transform
# id,usage,date,com_date,week,month,year
# com_date,date,id,month,usage,week,year
def get_data(path_to_dataset='df_dh.csv', sequence_length=20, stateful=False, issplit=True):
fold_index = 1
###
dtypes = {'sub': 'float', 'super': 'float', 'error': 'float', 'com_date': 'int', 'week': 'str', 'month': 'str',
'year': 'str', 'numbers': 'int', 'log': 'float', 'id': 'str', 'usage': 'float'}
parse_dates = ['date']
print(path_to_dataset)
df = pd.read_csv(DATAPATH + path_to_dataset, header=0, dtype=dtypes, parse_dates=parse_dates, encoding="utf-8")
# print(path_to_dataset)
print(df.columns)
df = df[df['error'] >= 0]
# df_test = pd.read_csv(DATAPATH+"test"+str(fold_index)+".csv", header = 0, dtype=dtypes, parse_dates=parse_dates,encoding="utf-8")
def helper(x):
split = list(map(int, x.strip('[').strip(']').split(',')))
d = {}
for counter, value in enumerate(split):
k = str(len(split)) + "-" + str(counter)
d[k] = value
return d
# df_train_temp = df_train['week'].apply(helper).apply(pd.Series)
df_week = df['week'].apply(helper).apply(pd.Series).as_matrix() # 7
df_month = df['month'].apply(helper).apply(pd.Series).as_matrix() # 12
df_year = df['year'].apply(helper).apply(pd.Series).as_matrix() # 3
df_empty = df[['super', 'com_date', 'error', 'numbers']].copy()
# print(df_empty)
df_super = df_empty.ix[:, [0]]
df_com_date = df_empty.ix[:, [1]]
df_error = df_empty.ix[:, [2]]
df_numbers = df_empty.ix[:, [3]]
X_train_ = np.column_stack((df_super, df_com_date, df_numbers, df_week, df_month))
Y_train_ = df_error.as_matrix()
ss_x = preprocessing.MaxAbsScaler()
ss_y = preprocessing.MaxAbsScaler()
global scaler
scaler = ss_y
# ss_x = preprocessing.StandardScaler()
array_new = ss_x.fit_transform(df_empty.ix[:, [0]])
df_super = pd.DataFrame(array_new)
array_new = ss_x.fit_transform(df_empty.ix[:, [1]])
df_com_date = pd.DataFrame(array_new)
array_new = ss_x.fit_transform(df_empty.ix[:, [3]])
df_numbers = pd.DataFrame(array_new)
array_new = ss_y.fit_transform(df_empty.ix[:, [2]])
df_error = pd.DataFrame(array_new)
df_week = ss_x.fit_transform(df_week)
df_week = pd.DataFrame(df_week)
df_month = ss_x.fit_transform(df_month)
df_month = pd.DataFrame(df_month)
X_train = np.column_stack((df_super, df_com_date, df_numbers, df_week, df_month))
Y_train = df_error.as_matrix()
print('Xshape:' + str(X_train.shape))
print('Yshape:' + str(Y_train.shape))
y_arr = Y_train.T.tolist()
# print(y_arr)
try:
y_arr = ss_y.inverse_transform(y_arr)
#draw_error_line(y_arr[0], df)
#draw_error_bar(y_arr[0])
except Exception as e:
print(e)
if not issplit:
print('Xshape:' + str(X_train.shape))
print('Yshape:' + str(Y_train.shape))
X_train, X_test, Y_train, Y_test = train_test_split(X_train_, Y_train_, test_size=0.1, shuffle=False)
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.1, shuffle=False)
return X_train, Y_train, X_test, Y_test, X_val, Y_val
else:
return split_CV(X_train, Y_train, sequence_length=sequence_length, stateful=False)
import datetime
def get_data_single_user(path_to_dataset='df_dh.csv', sequence_length=20, stateful=False, issplit=True):
fold_index = 1
###
dtypes = {'sub': 'float', 'super': 'float', 'error': 'float', 'com_date': 'int', 'week': 'str', 'month': 'str',
'year': 'str', 'numbers': 'int', 'log': 'float', 'id': 'str', 'usage': 'float'}
parse_dates = ['date']
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$' + path_to_dataset)
df = pd.read_csv(DATAPATH + path_to_dataset, header=0, dtype=dtypes, parse_dates=parse_dates, encoding="utf-8")
# print(path_to_dataset)
print(df.columns)
df = df[df['usage'] >= 0]
# df_test = pd.read_csv(DATAPATH+"test"+str(fold_index)+".csv", header = 0, dtype=dtypes, parse_dates=parse_dates,encoding="utf-8")
def helper(x):
split = list(map(int, x.strip('[').strip(']').split(',')))
d = {}
for counter, value in enumerate(split):
k = str(len(split)) + "-" + str(counter)
d[k] = value
return d
# df_train_temp = df_train['week'].apply(helper).apply(pd.Series)
df_week = df['week'].apply(helper).apply(pd.Series).as_matrix() # 7
df_month = df['month'].apply(helper).apply(pd.Series).as_matrix() # 12
df_year = df['year'].apply(helper).apply(pd.Series).as_matrix() # 3
df_empty = df[['com_date', 'usage']].copy()
# print(df_empty)
df_com_date = df_empty.ix[:, [0]]
df_usage = df_empty.ix[:, [1]]
ss_x = preprocessing.MaxAbsScaler()
ss_y = preprocessing.MaxAbsScaler()
global scaler
scaler = ss_y
# ss_x = preprocessing.StandardScaler()
array_new = ss_x.fit_transform(df_empty.ix[:, [0]])
df_com_date = pd.DataFrame(array_new)
array_new = ss_y.fit_transform(df_empty.ix[:, [1]])
df_usage = pd.DataFrame(array_new)
df_week = ss_x.fit_transform(df_week)
df_week = | pd.DataFrame(df_week) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIndex
import pandas.core.common as com
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_equal)
import pandas.util.testing as tm
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
raise nose.SkipTest('scipy.interpolate.pchip missing')
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
pass
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = {axis: list('ABCD')}
obj = self._construct(4, **kwargs)
# no values passed
# self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{axis: str.lower})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
self.assertEqual(s.get(i), d)
self.assertEqual(s.get(i, d), d)
self.assertEqual(s.get(i, "z"), d)
for other in others:
self.assertEqual(s.get(other, "z"), "z")
self.assertEqual(s.get(other, other), other)
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
self.assertRaises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
com.pprint_thing("this works and shouldn't")
self.assertRaises(ValueError, f)
self.assertRaises(ValueError, lambda: obj1 and obj2)
self.assertRaises(ValueError, lambda: obj1 or obj2)
self.assertRaises(ValueError, lambda: not obj1)
def test_numpy_1_7_compat_numeric_methods(self):
# GH 4435
# numpy in 1.7 tries to pass addtional arguments to pandas functions
o = self._construct(shape=4)
for op in ['min', 'max', 'max', 'var', 'std', 'prod', 'sum', 'cumsum',
'cumprod', 'median', 'skew', 'kurt', 'compound', 'cummax',
'cummin', 'all', 'any']:
f = getattr(np, op, None)
if f is not None:
f(o)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return self._construct(shape=3, dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
self.assertIsNone(v)
else:
self.assertEqual(v, getattr(y, m, None))
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
raise nose.SkipTest('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
# Check for error when random_state argument invalid.
with tm.assertRaises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with tm.assertRaises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with tm.assertRaises(ValueError):
o.sample(n=-3)
with tm.assertRaises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with tm.assertRaises(ValueError):
o.sample(n=3.2)
# Check lengths are right
self.assertTrue(len(o.sample(n=4) == 4))
self.assertTrue(len(o.sample(frac=0.34) == 3))
self.assertTrue(len(o.sample(frac=0.36) == 4))
###
# Check weights
###
# Weight length must be right
with tm.assertRaises(ValueError):
o.sample(n=3, weights=[0, 1])
with tm.assertRaises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with tm.assertRaises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with tm.assertRaises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with tm.assertRaises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with tm.assertRaises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=nan_weights)
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with tm.assertRaises(ValueError):
s.sample(n=3, weights='weight_column')
panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with tm.assertRaises(ValueError):
panel.sample(n=1, weights='weight_column')
with tm.assertRaises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with tm.assertRaises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with tm.assertRaises(ValueError):
df.sample(n=1, axis=2)
with tm.assertRaises(ValueError):
df.sample(n=1, axis='not_a_name')
with tm.assertRaises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with tm.assertRaises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10})
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
assert_frame_equal(sample1, df[['colString']])
# Test default axes
p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
minor_axis=[1, 3, 5])
assert_panel_equal(
p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
random_state=42))
assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
{'col1': [5, 6, 7],
'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with tm.assertRaises(ValueError):
df.sample(1, weights=s4)
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
self.assertTrue(o.size == np.prod(o.shape))
self.assertTrue(o.size == 10 ** len(o.axes))
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
self.assertTrue(len(np.array_split(o, 5)) == 5)
self.assertTrue(len(np.array_split(o, 2)) == 2)
def test_unexpected_keyword(self): # GH8597
from pandas.util.testing import assertRaisesRegexp
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with | assertRaisesRegexp(TypeError, 'unexpected keyword') | pandas.util.testing.assertRaisesRegexp |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # imports
import pandas as pd
import numpy as np
import time
# # Get data
positions = | pd.read_csv("../../../Data/positions.csv") | pandas.read_csv |
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from labels import *
def transformDataHotEncoding(df, labels=None):
if labels == None:
labels = df.columns
for col in labels:
if df[col].dtypes == "object":
if len(df[col].unique()) == 2:
df[col] = LabelEncoder().fit_transform(df[col])
else:
dummies = pd.get_dummies(df[col], prefix=col)
df = pd.concat([df, dummies], axis=1)
df = df.drop(col, axis=1)
return df
def transformDataLabelEncoding(df, labels=None, mode="auto"):
if labels == None:
labels = df.columns
for col in labels:
if mode == "auto":
df[col] = LabelEncoder().fit_transform(df[col])
if mode == "manual":
df[col] = transformTolabel(df[col], col)
return df
# Create a csv with transformed variables
if __name__ == "__main__":
# quantitativeLabel = [
# "credit_history",
# "purpose",
# "installment_as_income_perc",
# "personal_status_sex",
# "other_debtors",
# "present_res_since",
# "property",
# "other_installment_plans",
# "housing",
# "credits_this_bank",
# "job",
# "people_under_maintenance",
# "telephone",
# "foreign_worker",
# ]
# quantitativeLabelOrdered = ["account_check_status", "savings", "present_emp_since"]
# df = pd.read_csv("./dataset/raw_german_credit.csv", sep=",", header=0)
# df = transformDataHotEncoding(df, quantitativeLabel)
# df = transformDataLabelEncoding(df, labels=quantitativeLabelOrdered, mode="auto")
# df.to_csv("dataset/processedData.csv", index=False)
df = | pd.read_csv("../dataset/Database_Encodage.csv", sep=",", header=0) | pandas.read_csv |
import pandas as pd
from business_rules.operators import (DataframeType, StringType,
NumericType, BooleanType, SelectType,
SelectMultipleType, GenericType)
from . import TestCase
from decimal import Decimal
import sys
import pandas
class StringOperatorTests(TestCase):
def test_operator_decorator(self):
self.assertTrue(StringType("foo").equal_to.is_operator)
def test_string_equal_to(self):
self.assertTrue(StringType("foo").equal_to("foo"))
self.assertFalse(StringType("foo").equal_to("Foo"))
def test_string_not_equal_to(self):
self.assertTrue(StringType("foo").not_equal_to("Foo"))
self.assertTrue(StringType("foo").not_equal_to("boo"))
self.assertFalse(StringType("foo").not_equal_to("foo"))
def test_string_equal_to_case_insensitive(self):
self.assertTrue(StringType("foo").equal_to_case_insensitive("FOo"))
self.assertTrue(StringType("foo").equal_to_case_insensitive("foo"))
self.assertFalse(StringType("foo").equal_to_case_insensitive("blah"))
def test_string_starts_with(self):
self.assertTrue(StringType("hello").starts_with("he"))
self.assertFalse(StringType("hello").starts_with("hey"))
self.assertFalse(StringType("hello").starts_with("He"))
def test_string_ends_with(self):
self.assertTrue(StringType("hello").ends_with("lo"))
self.assertFalse(StringType("hello").ends_with("boom"))
self.assertFalse(StringType("hello").ends_with("Lo"))
def test_string_contains(self):
self.assertTrue(StringType("hello").contains("ell"))
self.assertTrue(StringType("hello").contains("he"))
self.assertTrue(StringType("hello").contains("lo"))
self.assertFalse(StringType("hello").contains("asdf"))
self.assertFalse(StringType("hello").contains("ElL"))
def test_string_matches_regex(self):
self.assertTrue(StringType("hello").matches_regex(r"^h"))
self.assertFalse(StringType("hello").matches_regex(r"^sh"))
def test_non_empty(self):
self.assertTrue(StringType("hello").non_empty())
self.assertFalse(StringType("").non_empty())
self.assertFalse(StringType(None).non_empty())
class NumericOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, err_string):
NumericType("foo")
def test_numeric_type_validates_and_casts_decimal(self):
ten_dec = Decimal(10)
ten_int = 10
ten_float = 10.0
if sys.version_info[0] == 2:
ten_long = long(10)
else:
ten_long = int(10) # long and int are same in python3
ten_var_dec = NumericType(ten_dec) # this should not throw an exception
ten_var_int = NumericType(ten_int)
ten_var_float = NumericType(ten_float)
ten_var_long = NumericType(ten_long)
self.assertTrue(isinstance(ten_var_dec.value, Decimal))
self.assertTrue(isinstance(ten_var_int.value, Decimal))
self.assertTrue(isinstance(ten_var_float.value, Decimal))
self.assertTrue(isinstance(ten_var_long.value, Decimal))
def test_numeric_equal_to(self):
self.assertTrue(NumericType(10).equal_to(10))
self.assertTrue(NumericType(10).equal_to(10.0))
self.assertTrue(NumericType(10).equal_to(10.000001))
self.assertTrue(NumericType(10.000001).equal_to(10))
self.assertTrue(NumericType(Decimal('10.0')).equal_to(10))
self.assertTrue(NumericType(10).equal_to(Decimal('10.0')))
self.assertFalse(NumericType(10).equal_to(10.00001))
self.assertFalse(NumericType(10).equal_to(11))
def test_numeric_not_equal_to(self):
self.assertTrue(NumericType(10).not_equal_to(10.00001))
self.assertTrue(NumericType(10).not_equal_to(11))
self.assertTrue(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.1')))
self.assertFalse(NumericType(10).not_equal_to(10))
self.assertFalse(NumericType(10).not_equal_to(10.0))
self.assertFalse(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.0')))
def test_other_value_not_numeric(self):
error_string = "10 is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, error_string):
NumericType(10).equal_to("10")
def test_numeric_greater_than(self):
self.assertTrue(NumericType(10).greater_than(1))
self.assertFalse(NumericType(10).greater_than(11))
self.assertTrue(NumericType(10.1).greater_than(10))
self.assertFalse(NumericType(10.000001).greater_than(10))
self.assertTrue(NumericType(10.000002).greater_than(10))
def test_numeric_greater_than_or_equal_to(self):
self.assertTrue(NumericType(10).greater_than_or_equal_to(1))
self.assertFalse(NumericType(10).greater_than_or_equal_to(11))
self.assertTrue(NumericType(10.1).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000001).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000002).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10).greater_than_or_equal_to(10))
def test_numeric_less_than(self):
self.assertTrue(NumericType(1).less_than(10))
self.assertFalse(NumericType(11).less_than(10))
self.assertTrue(NumericType(10).less_than(10.1))
self.assertFalse(NumericType(10).less_than(10.000001))
self.assertTrue(NumericType(10).less_than(10.000002))
def test_numeric_less_than_or_equal_to(self):
self.assertTrue(NumericType(1).less_than_or_equal_to(10))
self.assertFalse(NumericType(11).less_than_or_equal_to(10))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.1))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000001))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000002))
self.assertTrue(NumericType(10).less_than_or_equal_to(10))
class BooleanOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType("foo")
err_string = "None is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType(None)
def test_boolean_is_true_and_is_false(self):
self.assertTrue(BooleanType(True).is_true())
self.assertFalse(BooleanType(True).is_false())
self.assertFalse(BooleanType(False).is_true())
self.assertTrue(BooleanType(False).is_false())
class SelectOperatorTests(TestCase):
def test_contains(self):
self.assertTrue(SelectType([1, 2]).contains(2))
self.assertFalse(SelectType([1, 2]).contains(3))
self.assertTrue(SelectType([1, 2, "a"]).contains("A"))
def test_does_not_contain(self):
self.assertTrue(SelectType([1, 2]).does_not_contain(3))
self.assertFalse(SelectType([1, 2]).does_not_contain(2))
self.assertFalse(SelectType([1, 2, "a"]).does_not_contain("A"))
class SelectMultipleOperatorTests(TestCase):
def test_contains_all(self):
self.assertTrue(SelectMultipleType([1, 2]).
contains_all([2, 1]))
self.assertFalse(SelectMultipleType([1, 2]).
contains_all([2, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
contains_all([2, 1, "A"]))
def test_is_contained_by(self):
self.assertTrue(SelectMultipleType([1, 2]).
is_contained_by([2, 1, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
is_contained_by([2, 3, 4]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
is_contained_by([2, 1, "A"]))
def test_shares_at_least_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_at_least_one_element_with([4, "A"]))
def test_shares_exactly_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_exactly_one_element_with([4, "A"]))
self.assertFalse(SelectMultipleType([1, 2, 3]).
shares_exactly_one_element_with([2, 3, "a"]))
def test_shares_no_elements_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_no_elements_with([4, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_no_elements_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2, "a"]).
shares_no_elements_with([4, "A"]))
class DataframeOperatorTests(TestCase):
def test_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ],
})
result: pd.Series = DataframeType({"value": df}).exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_not_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ]
})
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, "", 7, ],
"var2": [3, 5, 6, "", 2, ],
"var3": [1, 3, 8, "", 7, ],
"var4": ["test", "issue", "one", "", "two", ]
})
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to({
"target": "--r1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 20
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var4",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False, False, ])))
def test_not_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": 20
}).equals(pandas.Series([True, True, True])))
def test_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "", "new", "val"],
"var2": ["WORD", "", "test", "VAL"],
"var3": ["LET", "", "GO", "read"]
})
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "NEW"
}).equals(pandas.Series([False, False, True, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False])))
def test_not_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "new", "val"],
"var2": ["WORD", "test", "VAL"],
"var3": ["LET", "GO", "read"],
"var4": ["WORD", "NEW", "VAL"]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
def test_less_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than({
"target": "--r1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": 3
}).equals(pandas.Series([True, True, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than_or_equal_to({
"target": "--r1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([False, False, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 5, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than_or_equal_to({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_greater_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": 5000
}).equals(pandas.Series([False, False, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than_or_equal_to({
"target": "var1",
"comparator": "--r4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([True, True, False])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 3, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than_or_equal_to({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_contains(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": 5
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).does_not_contain({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([False, True, True])))
def test_contains_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["POKEMON", "CHARIZARD", "BULBASAUR"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "PIKACHU"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["pikachu", "charizard", "bulbasaur"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var1",
"comparator": "IVYSAUR"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([False, True, True])))
def test_is_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [4,5,6]
}).equals(pandas.Series([False, False, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([True, True, False])))
def test_is_not_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([False, False, True])))
def test_is_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
def test_is_not_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
def test_prefix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).prefix_matches_regex({
"target": "--r2",
"comparator": "w.*",
"prefix": 2
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([False, False])))
def test_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).suffix_matches_regex({
"target": "--r1",
"comparator": "es.*",
"suffix": 3
}).equals(pandas.Series([False, True])))
self.assertTrue(DataframeType({"value": df}).suffix_matches_regex({
"target": "var1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([False, False])))
def test_not_prefix_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_prefix_matches_regex({
"target": "--r1",
"comparator": ".*",
"prefix": 2
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).not_prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([True, True])))
def test_not_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_suffix_matches_regex({
"target": "var1",
"comparator": ".*",
"suffix": 3
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_suffix_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([True, True])))
def test_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).matches_regex({
"target": "--r1",
"comparator": ".*",
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).matches_regex({
"target": "var2",
"comparator": "[0-9].*",
}).equals(pandas.Series([False, False])))
def test_not_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_matches_regex({
"target": "var1",
"comparator": ".*",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
}).equals(pandas.Series([True, True])))
def test_starts_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).starts_with({
"target": "--r1",
"comparator": "WO",
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).starts_with({
"target": "var2",
"comparator": "ABC",
}).equals(pandas.Series([False, False])))
def test_ends_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).ends_with({
"target": "--r1",
"comparator": "abc",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).ends_with({
"target": "var1",
"comparator": "est",
}).equals(pandas.Series([False, True])))
def test_has_equal_length(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
result = df_operator.has_equal_length({"target": "--r_1", "comparator": 4})
self.assertTrue(result.equals(pandas.Series([True, False])))
def test_has_not_equal_length(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
result = df_operator.has_not_equal_length({"target": "--r_1", "comparator": 4})
self.assertTrue(result.equals(pandas.Series([False, True])))
def test_longer_than(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.longer_than({"target": "--r_1", "comparator": 3}).equals(pandas.Series([True, True])))
def test_longer_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'alex']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.longer_than_or_equal_to({"target": "--r_1", "comparator": 3}).equals(pandas.Series([True, True])))
self.assertTrue(df_operator.longer_than_or_equal_to({"target": "var_1", "comparator": 4}).equals(pandas.Series([True, True])))
def test_shorter_than(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'val']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.shorter_than({"target": "--r_1", "comparator": 5}).equals(pandas.Series([True, True])))
def test_shorter_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'alex']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.shorter_than_or_equal_to({"target": "--r_1", "comparator": 5}).equals(pandas.Series([True, True])))
self.assertTrue(df_operator.shorter_than_or_equal_to({"target": "var_1", "comparator": 4}).equals(pandas.Series([True, True])))
def test_contains_all(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['test', 'value', 'word'],
"var2": ["test", "value", "test"]
}
)
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var1",
"comparator": "var2",
}))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_all({
"target": "--r1",
"comparator": "--r2",
}))
self.assertFalse(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": "var1",
}))
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": ["test", "value"],
}))
def test_not_contains_all(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['test', 'value', 'word'],
"var2": ["test", "value", "test"]
}
)
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var1",
"comparator": "var2",
}))
self.assertFalse(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": "var1",
}))
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": ["test", "value"],
}))
def test_invalid_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2099'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).invalid_date({"target": "--r1"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).invalid_date({"target": "var3"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).invalid_date({"target": "var2"})
.equals(pandas.Series([False, False, False, True, True])))
def test_date_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var1", "comparator": '2021'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "1997-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([False, False, True, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).date_equal_to({"target": "--r3", "comparator": "--r4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "hour"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "minute"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "second"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "microsecond"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var6", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var6", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_not_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var6", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_less_than(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var6", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_greater_than(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var1", "comparator": '2020'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "1996-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var6", "comparator": "var3", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var1", "comparator": '2020'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var1", "comparator": '2023'})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "1996-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var6", "comparator": "var3", "date_component": "hour"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var1", "comparator": '2020'})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var6", "comparator": "var3", "date_component": "hour"})
.equals(pandas.Series([True, True, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_is_incomplete_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": [ '2021', '2021', '2099'],
"var2": [ "1997-07-16", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df}).is_incomplete_date({"target" : "var1"})
.equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_incomplete_date({"target" : "var2"})
.equals(pandas.Series([False, False, False])))
def test_is_complete_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": ["2021", "2021", "2099"],
"var2": ["1997-07-16", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df}).is_complete_date({"target": "var1"})
.equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_complete_date({"target": "var2"})
.equals(pandas.Series([True, True, True])))
def test_is_unique_set(self):
df = pandas.DataFrame.from_dict( {"ARM": ["PLACEBO", "PLACEBO", "A", "A"], "TAE": [1,1,1,2], "LAE": [1,2,1,2], "ARF": [1,2,3,4]})
self.assertTrue(DataframeType({"value": df}).is_unique_set({"target" : "ARM", "comparator": "LAE"})
.equals( | pandas.Series([True, True, True, True]) | pandas.Series |
"""
Tests for scalar Timedelta arithmetic ops
"""
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas as pd
from pandas import NaT, Timedelta, Timestamp, offsets
import pandas._testing as tm
from pandas.core import ops
class TestTimedeltaAdditionSubtraction:
"""
Tests for Timedelta methods:
__add__, __radd__,
__sub__, __rsub__
"""
@pytest.mark.parametrize(
"ten_seconds",
[
Timedelta(10, unit="s"),
timedelta(seconds=10),
np.timedelta64(10, "s"),
np.timedelta64(10000000000, "ns"),
offsets.Second(10),
],
)
def test_td_add_sub_ten_seconds(self, ten_seconds):
# GH#6808
base = Timestamp("20130101 09:01:12.123456")
expected_add = Timestamp("20130101 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + ten_seconds
assert result == expected_add
result = base - ten_seconds
assert result == expected_sub
@pytest.mark.parametrize(
"one_day_ten_secs",
[
Timedelta("1 day, 00:00:10"),
Timedelta("1 days, 00:00:10"),
timedelta(days=1, seconds=10),
np.timedelta64(1, "D") + np.timedelta64(10, "s"),
offsets.Day() + offsets.Second(10),
],
)
def test_td_add_sub_one_day_ten_seconds(self, one_day_ten_secs):
# GH#6808
base = Timestamp("20130102 09:01:12.123456")
expected_add = Timestamp("20130103 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + one_day_ten_secs
assert result == expected_add
result = base - one_day_ten_secs
assert result == expected_sub
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_datetimelike_scalar(self, op):
# GH#19738
td = Timedelta(10, unit="d")
result = op(td, datetime(2016, 1, 1))
if op is operator.add:
# datetime + Timedelta does _not_ call Timedelta.__radd__,
# so we get a datetime back instead of a Timestamp
assert isinstance(result, Timestamp)
assert result == Timestamp(2016, 1, 11)
result = op(td, Timestamp("2018-01-12 18:09"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22 18:09")
result = op(td, np.datetime64("2018-01-12"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22")
result = op(td, NaT)
assert result is NaT
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_td(self, op):
td = Timedelta(10, unit="d")
result = op(td, Timedelta(days=10))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=20)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_pytimedelta(self, op):
td = Timedelta(10, unit="d")
result = op(td, timedelta(days=9))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=19)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedelta64(self, op):
td = Timedelta(10, unit="d")
result = op(td, np.timedelta64(-4, "D"))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=6)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_offset(self, op):
td = Timedelta(10, unit="d")
result = op(td, offsets.Hour(6))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=10, hours=6)
def test_td_sub_td(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_pytimedelta(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_pytimedelta()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_pytimedelta() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_timedelta64(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_timedelta64()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_timedelta64() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_nat(self):
# In this context pd.NaT is treated as timedelta-like
td = Timedelta(10, unit="d")
result = td - NaT
assert result is NaT
def test_td_sub_td64_nat(self):
td = Timedelta(10, unit="d")
td_nat = np.timedelta64("NaT")
result = td - td_nat
assert result is NaT
result = td_nat - td
assert result is NaT
def test_td_sub_offset(self):
td = | Timedelta(10, unit="d") | pandas.Timedelta |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 10 21:19:29 2022
@author: sarathbabu.karunanit
"""
import pandas as pd
from itertools import combinations,product
class large_product_affinity:
def __init__(self,__df,support):
self.__df=__df
self.support=support
tran,prod=self.__df.columns.values
self.__df=self.__df.rename(columns={tran:'tran',prod:'prod'})
self.total=self.__df.tran.nunique()
def __mutual_support(self,x,y):
return x&y
def __cllc(self,x):
x['confidence']=x['support']/x['antecedent_support']
x['lift']=x['confidence']/x['consequent_support']
x['leverage']=x['support']-(x['antecedent_support']*x['consequent_support'])
x['conviction']=(1-x['consequent_support'])/(1-x['confidence'])
return x
def __post_processing(self):
self.__mba=self.__cllc(self.__mba)
self.__mba.drop(['antecedent_tran','consequent_tran','support_tran'],axis=1,inplace=True)
self.__mba=self.__mba.sort_values(by=['confidence','lift'],ascending=False,ignore_index=True)
def __individual_support(self):
self.__df=self.__df.groupby(['prod']).agg(tran=pd.NamedAgg(column='tran',aggfunc=lambda x:set(list(x))),\
support=pd.NamedAgg(column='tran',aggfunc=lambda x:len(set(list(x)))/self.total)).reset_index()
self.__df=self.__df.loc[self.__df['support']>=self.support]
if len(self.__df)>0:
self.__df=self.__df.sort_values(by=['support'],ascending=False)
self.__flag=True
else:
print('No Products available for the specified support')
self.__flag=False
def __perm_merge(self):
self.__df_comb=pd.merge(self.__df_comb,self.__df,on=['consequent'],how='inner')
self.__df_perm=self.__df_comb.copy()
self.__df_perm.columns=[i.replace('antecedent','consequent') if 'antecedent' in i else i.replace('consequent','antecedent') \
for i in self.__df_comb.columns.values]
self.__df_perm=self.__df_perm[self.__df_comb.columns.values]
self.__df_comb=pd.concat([self.__df_comb,self.__df_perm],axis=0,ignore_index=True)
self.__df_comb['support_tran']=self.__df_comb.apply(lambda x:self.__mutual_support(x['antecedent_tran'],x['consequent_tran']),axis=1)
self.__df_comb['support']=self.__df_comb['support_tran'].apply(lambda x:len(x)/self.total)
self.__df_comb=self.__df_comb.loc[self.__df_comb['support']>=self.support]
self.__df_comb=self.__df_comb.sort_values(by=['support'],ascending=False,ignore_index=True)
def __perm_merge_two(self):
self.__df_comb_two=pd.merge(self.__df_comb_two,self.__df_comb_pair,on=['consequent'],how='inner')
self.__df_perm=self.__df_comb_two.copy()
self.__df_perm.columns=[i.replace('antecedent','consequent') if 'antecedent' in i else i.replace('consequent','antecedent') \
for i in self.__df_comb_two.columns.values]
self.__df_perm=self.__df_perm[self.__df_comb_two.columns.values]
self.__df_comb_two=pd.concat([self.__df_comb_two,self.__df_perm],axis=0,ignore_index=True)
self.__df_comb_two['support_tran']=self.__df_comb_two.apply(lambda x:self.__mutual_support(x['antecedent_tran'],x['consequent_tran']),axis=1)
self.__df_comb_two['support']=self.__df_comb_two['support_tran'].apply(lambda x:len(x)/self.total)
self.__df_comb_two=self.__df_comb_two.loc[self.__df_comb_two['support']>=self.support]
self.__df_comb_two=self.__df_comb_two.sort_values(by=['support'],ascending=False,ignore_index=True)
def __perm_merge_three(self):
self.__df_balance=pd.merge(self.__df_balance,self.__mba_copy,on=['consequent'],how='inner')
self.__df_perm=self.__df_balance.copy()
self.__df_perm.columns=[i.replace('antecedent','consequent') if 'antecedent' in i else i.replace('consequent','antecedent') \
for i in self.__df_balance.columns.values]
self.__df_perm=self.__df_perm[self.__df_balance.columns.values]
self.__df_balance=pd.concat([self.__df_balance,self.__df_perm],axis=0,ignore_index=True)
self.__df_balance['support_tran']=self.__df_balance.apply(lambda x:self.__mutual_support(x['antecedent_tran'],x['consequent_tran']),axis=1)
self.__df_balance['support']=self.__df_balance['support_tran'].apply(lambda x:len(x)/self.total)
self.__df_balance=self.__df_balance.loc[self.__df_balance['support']>=self.support]
self.__df_balance=self.__df_balance.sort_values(by=['support'],ascending=False,ignore_index=True)
def __two_combinations(self):
self.__individual_support()
if self.__flag:
self.__df_comb=pd.DataFrame(list(combinations(self.__df['prod'].unique(),2)),columns=['antecedent','consequent'])
self.__df=self.__df.rename(columns={'prod':'antecedent','tran':'antecedent_tran','support':'antecedent_support'})
self.__df_comb=pd.merge(self.__df_comb,self.__df,on=['antecedent'],how='inner')
self.__df.columns=[i.replace('antecedent','consequent') for i in self.__df.columns.values]
self.__perm_merge()
if len(self.__df_comb)>0:
self.__mba=self.__df_comb.copy()
#self.post_processing()
return self.__mba
else:
print('No Two Products available for the specified support')
self.__flag=False
self.__df_one=self.__df.copy()
self.__df_one.columns=['Product','Product_tran','Support']
return self.__df_one.reset_index(drop=True)
def __two_two_combinations(self):
self.__df_comb_two=pd.DataFrame([(i,j) for i,j in list(combinations(self.__df_comb['antecedent'].unique(),2)) \
if len(set(i.split(','))&set(j.split(',')))==0],columns=['antecedent','consequent'])
self.__df_comb_two=pd.merge(self.__df_comb,self.__df_comb_two,on=['antecedent'],how='inner')
self.__df_comb_pair=self.__df_comb.copy()
self.__df_comb_pair.columns=[i.replace('antecedent','consequent') for i in self.__df_comb_pair.columns.values]
if len(self.__df_comb_two)>0:
self.__perm_merge_two()
def __three_two_combinations(self):
self.__mba_copy=self.__mba.copy()
self.__mba_copy['ant_len']=self.__mba_copy['antecedent'].apply(lambda x:len(x.split(',')))
self.__mba_copy=self.__mba_copy.loc[self.__mba_copy['consequent_support']>=self.__mba_copy['antecedent_support']].\
sort_values(by=['consequent_support'])
self.__mba_copy=self.__mba_copy.loc[self.__mba_copy['ant_len']>1,['antecedent','antecedent_tran','antecedent_support']]
self.__mba_copy['antecedent']=self.__mba_copy['antecedent'].apply(lambda x:",".join(sorted(x.split(','))))
self.__mba_copy=self.__mba_copy.drop_duplicates(subset=['antecedent'],keep='first')
self.__df_balance=pd.DataFrame([(i,j) for i,j in list(product(self.__df_comb['antecedent'].unique(),self.__mba_copy['antecedent'].unique())) \
if len(set(i.split(','))&set(j.split(',')))==0],columns=['antecedent','consequent'])
self.__df_balance=pd.merge(self.__df_comb,self.__df_balance,on=['antecedent'],how='inner')
self.__mba_copy.columns=[i.replace('antecedent','consequent') for i in self.__mba_copy.columns.values]
if len(self.__df_balance)>0:
self.__perm_merge_three()
def calc(self):
self.__two_combinations()
if self.__flag:
while len(self.__df_comb)>0:
self.__df_comb=self.__df_comb.loc[self.__df_comb['consequent_support']>=self.__df_comb['antecedent_support']].\
sort_values(by=['consequent_support'])
self.__df_comb['antecedent']=self.__df_comb['antecedent']+','+self.__df_comb['consequent']
self.__df_comb['antecedent']=self.__df_comb['antecedent'].apply(lambda x:",".join(sorted(x.split(','))))
self.__df_comb=self.__df_comb.drop_duplicates(subset=['antecedent'],keep='first')
self.__df_comb=self.__df_comb[['antecedent','support_tran','support']]
self.__df_comb.columns=['antecedent','antecedent_tran','antecedent_support']
self.__two_two_combinations()
self.__three_two_combinations()
self.__df_comb_three=pd.DataFrame([(i,j) for i,j in list(product(self.__df_comb['antecedent'],self.__df['consequent'])) if j not in i],\
columns=['antecedent','consequent'])
self.__df_comb=pd.merge(self.__df_comb,self.__df_comb_three,on=['antecedent'])
self.__perm_merge()
self.__mba=pd.concat([self.__mba,self.__df_comb],axis=0,ignore_index=True)
self.__mba= | pd.concat([self.__mba,self.__df_comb_two],axis=0,ignore_index=True) | pandas.concat |
import sys
import os
import torch
import numpy as np
import torch_geometric.datasets
import pyximport
from torch_geometric.data import InMemoryDataset, download_url
import pandas as pd
from sklearn import preprocessing
pyximport.install(setup_args={'include_dirs': np.get_include()})
import os.path as osp
from torch_geometric.data import Data
import time
from torch_geometric.utils import add_self_loops, negative_sampling
from torch_geometric.data import Dataset
from functools import lru_cache
import copy
from fairseq.data import (
NestedDictionaryDataset,
NumSamplesDataset,
)
import json
import pathlib
from pathlib import Path
BASE = Path(os.path.realpath(__file__)).parent
GLOBAL_ROOT = str(BASE / 'graphormer_repo' / 'graphormer')
sys.path.insert(1, (GLOBAL_ROOT))
from data.wrapper import preprocess_item
import datetime
def find_part(hour):
if hour < 11:
part = 1
elif (hour > 11) & (hour < 20):
part = 2
else:
part = 3
return part
def prepare_raw_dataset_edge(dataset_name):
if dataset_name == 'abakan':
raw_data = pd.read_csv('datasets/abakan/raw/abakan_full_routes_final_weather_L_NaN_filtered_FIXED.csv')
all_roads_graph = list(pd.read_pickle('datasets/abakan/raw/all_roads_graph.pickle').to_networkx().edges())
all_nodes = pd.read_pickle('datasets/abakan/raw/clear_nodes.pkl')
init = pd.read_csv('datasets/abakan/raw/graph_abakan_init.csv')
elif dataset_name == 'omsk':
raw_data = pd.read_csv('datasets/omsk/raw/omsk_full_routes_final_weather_L_NaN_filtered_FIXED.csv')
all_roads_graph = list(pd.read_pickle('datasets/omsk/raw/all_roads_graph.pickle').to_networkx().edges())
# all_nodes = pd.read_pickle('datasets/omsk/raw/clear_nodes.pkl')
init = pd.read_csv('datasets/omsk/raw/graph_omsk_init.csv')
all_roads_dataset = pd.DataFrame()
all_edge_list = [list((all_roads_graph)[i]) for i in range(0,len( (all_roads_graph)))]
all_roads_dataset['edge_id']= range(0,len(init['edge_id'].unique()))
all_roads_dataset['speed'] = ' 1'
all_roads_dataset['length'] = ' 1'
all_roads_dataset[' start_point_part'] = init['quartal_id'] / len(init['quartal_id'].unique())
all_roads_dataset['finish_point_part'] = init['quartal_id'] / len(init['quartal_id'].unique())
all_roads_dataset_edges = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on set/2020
json a partir da tabela sqlite
@author: github rictom/rede-cnpj
2020-11-25 - Se uma tabela já existir, parece causar lentidão para o pandas pd.to_sql.
Não fazer Create table ou criar índice para uma tabela a ser criada ou modificada pelo pandas
"""
import os, sys, glob
import time, copy, re, string, unicodedata, collections
import pandas as pd, sqlalchemy
from fnmatch import fnmatch
'''
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite://',
connect_args={'check_same_thread':False},
poolclass=StaticPool)
'''
import config
try:
camDbSqlite = config.config['BASE']['base_receita']
except:
sys.exit('o arquivo sqlite não foi localizado. Veja o caminho da base no arquivo de configuracao rede.ini está correto.')
camDBSqliteFTS = config.config['BASE'].get('base_receita_fulltext','')
caminhoDBLinks = config.config['BASE'].get('base_links', '')
caminhoDBEnderecoNormalizado = config.config['BASE'].get('base_endereco_normalizado', '')
#logAtivo = True if config['rede']['logAtivo']=='1' else False #registra cnpjs consultados
logAtivo = config.config['ETC'].getboolean('logativo',False) #registra cnpjs consultados
# ligacaoSocioFilial = True if config['rede']['ligacaoSocioFilial']=='1' else False #registra cnpjs consultados
ligacaoSocioFilial = config.config['ETC'].getboolean('ligacao_socio_filial',False) #registra cnpjs consultados
class DicionariosCodigos():
def __init__(self):
dfaux = pd.read_csv(r"tabelas/tabela-de-qualificacao-do-socio-representante.csv", sep=';')
self.dicQualificacao_socio = pd.Series(dfaux.descricao.values,index=dfaux.codigo).to_dict()
dfaux = pd.read_csv(r"tabelas/DominiosMotivoSituaoCadastral.csv", sep=';', encoding='latin1', dtype=str)
self.dicMotivoSituacao = pd.Series(dfaux['Descrição'].values, index=dfaux['Código']).to_dict()
dfaux = pd.read_excel(r"tabelas/cnae.xlsx", sheet_name='codigo-grupo-classe-descr')
self.dicCnae = pd.Series(dfaux['descricao'].values, index=dfaux['codigo']).to_dict()
self.dicSituacaoCadastral = {'01':'Nula', '02':'Ativa', '03':'Suspensa', '04':'Inapta', '08':'Baixada'}
#self.dicSituacaoCadastral = {'1':'Nula', '2':'Ativa', '3':'Suspensa', '4':'Inapta', '8':'Baixada'}
self.dicPorteEmpresa = {'00':'Não informado', '01':'Micro empresa', '03':'Empresa de pequeno porte', '05':'Demais (Médio ou Grande porte)'}
dfaux = pd.read_csv(r"tabelas/natureza_juridica.csv", sep=';', encoding='latin1', dtype=str)
self.dicNaturezaJuridica = pd.Series(dfaux['natureza_juridica'].values, index=dfaux['codigo']).to_dict()
gdic = DicionariosCodigos()
dfaux=None
gTableIndex = 0
gEngineExecutionOptions = {"sqlite_raw_colnames": True, 'pool_size':1} #poll_size=1 força usar só uma conexão??
kCaractereSeparadorLimite = '@'
#decorator para medir tempo de execução de função
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
if 'log_time' in kw:
name = kw.get('log_name', method.__name__.upper())
kw['log_time'][name] = int((te - ts) * 1000)
else:
print ('%r %2.2f ms' % \
(method.__name__, (te - ts) * 1000))
return result
return timed
def apagaTabelasTemporarias():
con = sqlalchemy.create_engine(f"sqlite:///{camDbSqlite}", execution_options=gEngineExecutionOptions)
con.execute('DROP TABLE if exists tmp_cnpjs')
con.execute('DROP TABLE if exists tmp_cpfnomes')
con.execute('DROP TABLE if exists tmp_ids')
con.execute('DROP TABLE if exists tmp_socios')
con.execute('DROP TABLE if exists tmp_busca_nome')
con = None
apagaTabelasTemporarias() #apaga quando abrir o módulo
def buscaPorNome(nomeIn, limite=10): #nome tem que ser completo. Com Teste, pega item randomico
'''camDBSqliteFTS base com indice full text search, fica rápido com match mas com = fila lento, por isso
precisa fazer consulta em camDbSqlite quando não for usar match
'''
#remove acentos
nomeIn = nomeIn.strip().upper()
nomeMatch = ''
try:
limite = int(limite)
except:
limite = 0
# print('limite', limite)
limite = min(limite,100) if limite else 10
if ('*' in nomeIn) or ('?' in nomeIn) or ('"' in nomeIn):
nomeMatchInicial = nomeIn.strip()
nomeMatch = nomeMatchInicial
nomeMatchInicial = nomeMatchInicial.replace('"','') #para usar com fnmatch
if nomeMatch.startswith('*'): #match do sqlite não aceita * no começo
nomeMatch = nomeMatch[1:].strip()
if '?' in nomeMatch: #? não é aceito em match do sqlite, mas pode ser usado no fnmatch
nomeMatch = nomeMatch.replace('?', '*')
if camDBSqliteFTS:
confts = sqlalchemy.create_engine(f"sqlite:///{camDBSqliteFTS}", execution_options=gEngineExecutionOptions)
con = sqlalchemy.create_engine(f"sqlite:///{camDbSqlite}", execution_options=gEngineExecutionOptions)
nome = ''.join(x for x in unicodedata.normalize('NFKD', nomeIn) if x in string.printable).upper()
cjs, cps = set(), set()
#if (' ' not in nome) and (nome not in ('TESTE',)): #só busca nome
# return cjs, cps
# print('nomeMatch', nomeMatch)
# print('nome',nome)
#pega cpfs
if nomeMatch:
if not camDBSqliteFTS: #como não há tabela, não faz consulta por match
#con = None
return set(), set()
queryfts = f'''
SELECT DISTINCT nome_socio as nome
FROM socios_search
where nome_socio match \'{nomeMatch}\'
limit {limite*20}
'''
df_busca_nomesPF = pd.read_sql(queryfts, confts, index_col=None)
df_busca_nomesPF.to_sql('tmp_busca_nomePF', con, if_exists='replace', index=None)
query = f'''
SELECT distinct cnpj_cpf_socio, nome_socio
from tmp_busca_nomePF tn
left join socios ts on tn.nome=ts.nome_socio
where cnpj_cpf_socio not null and nome_socio<>"" and length(cnpj_cpf_socio)=11
limit {limite*2}
'''
#obs 26/4/2021, a rigor não seria necessário length(cnpj_cpf_socio)=11, o problema é que a base está com erro no nome de sócios em que são empresas
elif nomeIn=='TESTE':
query = 'select cnpj_cpf_socio, nome_socio from socios where rowid > (abs(random()) % (select (select max(rowid) from socios)+1)) LIMIT 1;'
else:
query = f'''
SELECT distinct cnpj_cpf_socio, nome_socio
FROM socios
where nome_socio=\'{nome}\'
limit {limite}
'''
#nomeMatch = nomeMatch.replace('"','')
# print('query', query)
contagemRegistros = 0
for r in con.execute(query):
if contagemRegistros>=limite:
break
if nomeMatch:
if not fnmatch(r.nome_socio, nomeMatchInicial):
continue
if len(r.cnpj_cpf_socio)==14:
cjs.add(r.cnpj_cpf_socio)
elif len(r.cnpj_cpf_socio)==11:
cps.add((r.cnpj_cpf_socio, r.nome_socio))
contagemRegistros += 1
if nome=='TESTE':
print('##TESTE com identificador aleatorio:', cjs, cps)
con = None
return cjs, cps
#pega cnpjs
if nomeMatch:
queryfts = f'''
SELECT DISTINCT razao_social as nome
FROM empresas_search
where razao_social match \'{nomeMatch}\'
limit {limite*20}
'''
df_busca_nomesPJ = pd.read_sql(queryfts, confts, index_col=None)
df_busca_nomesPJ.to_sql('tmp_busca_nomePJ', con, if_exists='replace', index=None)
query = f'''
SELECT te.cnpj, t.razao_social
from tmp_busca_nomePJ tn
inner join empresas t on tn.nome = t.razao_social
left join estabelecimento te on te.cnpj_basico=t.cnpj_basico --inner join fica lento??
limit {limite*2}
'''
else:
# pra fazer busca por razao_social, a coluna deve estar indexada
query = f'''
SELECT te.cnpj, razao_social
FROM empresas t
inner join estabelecimento te on te.cnpj_basico=t.cnpj_basico
where t.razao_social=\'{nome}\'
limit {limite}
'''
for r in con.execute(query):
if contagemRegistros>=limite:
break
if nomeMatch:
if not fnmatch(r.razao_social, nomeMatchInicial):
continue
cjs.add(r.cnpj)
contagemRegistros +=1
con = None
return cjs, cps
#.def buscaPorNome(
def busca_cnpj(cnpj_basico, limiteIn):
con = sqlalchemy.create_engine(f"sqlite:///{camDbSqlite}", execution_options=gEngineExecutionOptions)
try:
limite = int(limiteIn)
except ValueError:
limite = 0
if limite>0: #limita a quantidade de filias
query = f'''
SELECT te.cnpj
FROM empresas t
inner join estabelecimento te on te.cnpj_basico=t.cnpj_basico
where t.cnpj_basico=\'{cnpj_basico}\'
order by te.matriz_filial, te.cnpj_ordem
limit {limite+1}
'''
elif limite<0: #mostra todos as filiais e matriz
query = f'''
SELECT te.cnpj
FROM empresas t
left join estabelecimento te on te.cnpj_basico=t.cnpj_basico
where t.cnpj_basico=\'{cnpj_basico}\'
order by te.matriz_filial, te.cnpj_ordem
'''
else: #sem limite definido, só matriz
query = f'''
SELECT te.cnpj
FROM empresas t
inner join estabelecimento te on te.cnpj_basico=t.cnpj_basico
where t.cnpj_basico=\'{cnpj_basico}\' and te.matriz_filial is '1'
'''
r = con.execute(query).fetchall()
return {k[0] for k in r}
def busca_cpf(cpfin):
'''como a base não tem cpfs de sócios completos, faz busca só do miolo'''
cpf = '***' + cpfin[3:9] + '**'
con = sqlalchemy.create_engine(f"sqlite:///{camDbSqlite}", execution_options=gEngineExecutionOptions)
query = f'''
SELECT distinct cnpj_cpf_socio, nome_socio
FROM socios
where cnpj_cpf_socio=\'{cpf}\'
limit 100
'''
lista = []
for c, n in con.execute(query).fetchall():
lista.append((c,n))
return lista
def separaEntrada(cpfcnpjIn='', listaIds=None):
cnpjs = set()
cpfnomes = set()
outrosIdentificadores = set() #outros identificadores, com EN_ (supondo dois caracteres mais underscore)
if cpfcnpjIn:
lista = cpfcnpjIn.split(';')
lista = [i.strip() for i in lista if i.strip()]
else:
lista = listaIds
for i in lista:
if i.startswith('PJ_'):
cnpjs.add(i[3:])
elif i.startswith('PF_'):
cpfcnpjnome = i[3:]
cpf = cpfcnpjnome[:11]
nome = cpfcnpjnome[12:]
cpfnomes.add((cpf,nome))
elif i.startswith('PE_'):
cpfcnpjnome = i[3:]
nome = cpfcnpjnome
cpf = ''
cpfnomes.add((cpf,nome))
elif len(i)>3 and i[2]=='_':
outrosIdentificadores.add(i)
else:
limite = 0
if kCaractereSeparadorLimite in i:
i, limite = kCaractereSeparadorLimite.join(i.split(kCaractereSeparadorLimite)[0:-1]).strip(), i.split(kCaractereSeparadorLimite)[-1]
if not limite:
limite=-1
soDigitos = ''.join(re.findall('\d', str(i)))
if len(soDigitos)==14:
cnpjs.add(soDigitos)
elif len(soDigitos)==8:
scnpj_aux = busca_cnpj(soDigitos, limite)
if scnpj_aux:
cnpjs.update(scnpj_aux)
elif len(soDigitos)==11:
lcpfs = busca_cpf(soDigitos)
if lcpfs:
cpfnomes.update(set(lcpfs))
elif re.search('\*\*\*\d\d\d\d\d\d\*\*',str(i)):
lcpfs = set(busca_cpf(str(i)))
if lcpfs:
cpfnomes.update(set(lcpfs))
pass #fazer verificação por CPF??
elif not soDigitos and i.strip():
cnpjsaux, cpfnomesaux = buscaPorNome(i, limite=limite)
if cnpjsaux:
cnpjs.update(cnpjsaux)
if cpfnomesaux:
cpfnomes.update(cpfnomesaux)
return cnpjs, cpfnomes, outrosIdentificadores
#.def separaEntrada
def jsonRede(cpfcnpjIn, camada=1 ):
if cpfcnpjIn:
return camadasRede(cpfcnpjIn = cpfcnpjIn, camada=camada, bjson=True)
else:
return {'no': [], 'ligacao':[]}
#.def jsonRede
dtype_tmp_ids={'identificador':sqlalchemy.types.VARCHAR,
'grupo':sqlalchemy.types.VARCHAR,
'camada':sqlalchemy.types.INTEGER }
dtype_tmp_cnpjs={'cnpj':sqlalchemy.types.VARCHAR,
'grupo':sqlalchemy.types.VARCHAR,
'camada':sqlalchemy.types.INTEGER }
dtype_tmp_cpfnomes={'cpf':sqlalchemy.types.VARCHAR,
'nome':sqlalchemy.types.VARCHAR,
'grupo':sqlalchemy.types.VARCHAR,
'camada':sqlalchemy.types.INTEGER }
def criaTabelasTmpParaCamadas(con, cpfcnpjIn='', listaIds=None, grupo=''):
#con = sqlalchemy.create_engine(f"sqlite:///{camDbSqlite}", execution_options={"sqlite_raw_colnames": True})
global gTable
#con.execute('DROP TABLE IF EXISTS tmp_cnpjs;') #xx
#con.execute('DROP TABLE IF EXISTS tmp_cpfnomes;') #xx
apagaTabelasTemporarias()
# con.execute('''
# CREATE TEMP TABLE tmp_cnpjs (
# cnpj TEXT,
# grupo TEXT,
# camada INTEGER
# )''')
# con.execute('''
# CREATE TEMP TABLE tmp_cpfnomes (
# cpf TEXT,
# nome TEXT,
# grupo TEXT,
# camada INTEGER
# )''')
if cpfcnpjIn:
cnpjs, cpfnomes, outrosIdentificadores = separaEntrada(cpfcnpjIn=cpfcnpjIn)
else:
cnpjs, cpfnomes, outrosIdentificadores = separaEntrada(listaIds=listaIds)
camadasIds = {}
ids = set(['PJ_'+c for c in cnpjs])
ids.update(set(['PF_'+cpf+'-'+nome for cpf,nome in cpfnomes if cpf]))
ids.update(set(['PE_'+nome for cpf,nome in cpfnomes if not cpf]))
ids.update(outrosIdentificadores)
#con.execute('DROP TABLE IF EXISTS tmp_ids;') #xx
# con.execute('''
# CREATE TEMP TABLE tmp_ids (
# identificador TEXT,
# grupo TEXT
# camada INTEGER
# )''')
dftmptable = pd.DataFrame({'identificador' : list(ids)})
dftmptable['camada'] = 0
dftmptable['grupo'] = grupo
#con.execute('DELETE FROM tmp_ids')
#dftmptable.set_index('identificador', inplace=True)
dftmptable.to_sql('tmp_ids', con=con, if_exists='replace', index=False, dtype=dtype_tmp_ids)
#indice deixa a busca lenta!
#con.execute('CREATE INDEX ix_tmp_ids_index ON tmp_ids ("identificador")')
camadasIds = {i:0 for i in ids}
# for cnpj in cnpjs:
# camadasIds[cnpj]=0
# for cpf,nome in cpfnomes:
# camadasIds[(cpf, nome)] = 0;
for outros in outrosIdentificadores:
camadasIds[outros]=0
dftmptable = pd.DataFrame({'cnpj' : list(cnpjs)})
dftmptable['grupo'] = grupo
dftmptable['camada'] = 0
#con.execute('DELETE FROM tmp_cnpjs')
dftmptable.to_sql('tmp_cnpjs', con=con, if_exists='replace', index=False, dtype=dtype_tmp_cnpjs)
#dftmptable.to_sql('tmp_cnpjs', con=con, if_exists='replace', index=False, dtype=dtype_tmp_cnpjs)
dftmptable = pd.DataFrame(list(cpfnomes), columns=['cpf', 'nome'])
dftmptable['grupo'] = grupo
dftmptable['camada'] = 0
#con.execute('DELETE FROM tmp_cpfnomes')
dftmptable.to_sql('tmp_cpfnomes', con=con, if_exists='replace', index=False, dtype=dtype_tmp_cpfnomes)
return camadasIds, cnpjs, cpfnomes #, ids
#.def criaTabelasTmpParaCamadas
def cnpj2id(cnpj):
return 'PJ_' + cnpj
def cpfnome2id(cpf,nome):
if cpf!='':
return 'PF_'+cpf+'-'+nome
else:
return 'PE_'+nome
def id2cpfnome(id):
if id.startswith('PF_'):
return id[3:14], id[15:]
if id.startswith('PE_'):
return '', id[3:]
def id2cnpj(id):
return id[3:]
@timeit
def camadasRede(cpfcnpjIn='', listaIds=None, camada=1, grupo='', bjson=True):
# usando SQL
#se cpfcnpjIn=='', usa dados das tabelas tmp_cnpjs e tmp_cpfnomes, não haverá camada=0
#se fromTmpTable=False, espera que cpfcnpjIn='cpf-nome;cnpj;nome...'
#se fromTmpTable=True, ignora cpfcnpjIn e pega dados a partir de tmp_cnpjs e tmp_cpfnomes
#print('INICIANDO-------------------------')
#print(f'camadasRede ({camada})-{cpfcnpjIn}-inicio: ' + time.ctime() + ' ', end='')
mensagem = {'lateral':'', 'popup':'', 'confirmar':''}
#con=sqlite3.connect(camDbSqlite)
con = sqlalchemy.create_engine(f"sqlite:///{camDbSqlite}", execution_options=gEngineExecutionOptions)
'''
https://stackoverflow.com/questions/17497614/sqlalchemy-core-connection-context-manager
from sqlalchemy import create_engine
engine = create_engine("sqlite:///:memory:")
with engine.connect() as conn:
print(conn.closed)
print(conn.closed)'''
grupo = str(grupo)
nosaux = []
#nosids = set()
ligacoes = []
#setOrigDest = set()
camadasIds, cnpjs, cpfnomes = criaTabelasTmpParaCamadas(con, cpfcnpjIn=cpfcnpjIn, listaIds=listaIds, grupo=grupo)
# if cpfcnpjIn:
# camadasIds = criaTabelasTmpParaCamadas(con, cpfcnpjIn=cpfcnpjIn, grupo=grupo, listaCpfCnpjs=listaCpfCnpjs)
# else:
# camadasIds = {}
#cnpjs=set() #precisa adicionar os cnpjs que não tem sócios
#cpfnomes = set()
dicRazaoSocial = {} #excepcional, se um cnpj que é sócio na tabela de socios não tem cadastro na tabela empresas
for cam in range(camada):
# query_indices = '''
# CREATE unique INDEX ix_tmp_cnpjs ON tmp_cnpjs (cnpj);
# CREATE unique INDEX ix_tmp_cpfnomes ON tmp_cpfnomes (cpf, nome);
# CREATE INDEX ix_tmp_cpfnomes_cpf ON tmp_cpfnomes (cpf);
# CREATE INDEX ix_tmp_cpfnomes_nome ON tmp_cpfnomes (nome);
# CREATE unique INDEX ix_tmp_ids ON tmp_ids (identificador);
# '''
whereMatriz = ''
if bjson and not ligacaoSocioFilial:
if cam==-1:
whereMatriz = ''
else:
# verificar, talvez não esteja correto, precisa ver a camada da filial
whereMatriz = '''
WHERE substr(t.cnpj,9,4)="0001"
'''
#AND (length(cnpj_cpf_socio)<>14 OR substr(cnpj_cpf_socio, 9, 4)="0001")
query = f'''
DROP TABLE if exists tmp_socios;
CREATE TABLE tmp_socios AS
SELECT DISTINCT
* From (
SELECT t.cnpj, t.cnpj_cpf_socio, t.nome_socio, sq.qualificacao_socio as cod_qualificacao
FROM socios t
INNER JOIN tmp_cnpjs tl ON tl.cnpj = t.cnpj
left join socio_qualificacao sq ON sq.codigo=t.qualificacao_socio
UNION
SELECT t.cnpj, t.cnpj_cpf_socio, t.nome_socio, sq.qualificacao_socio as cod_qualificacao
FROM socios t
INNER JOIN tmp_cnpjs tl ON tl.cnpj = t.cnpj_cpf_socio
left join socio_qualificacao sq ON sq.codigo=t.qualificacao_socio
{whereMatriz}
UNION
SELECT t.cnpj, t.cnpj_cpf_socio, t.nome_socio, sq.qualificacao_socio as cod_qualificacao
FROM socios t
INNER JOIN tmp_cpfnomes tn ON tn.nome= t.nome_socio AND tn.cpf=t.cnpj_cpf_socio
left join socio_qualificacao sq ON sq.codigo=t.qualificacao_socio
{whereMatriz}
) as taux
;
Insert INTO tmp_socios (cnpj, cnpj_cpf_socio, nome_socio, cod_qualificacao)
select tm.cnpj, tp.cnpj as cnpj_cpf_socio, "" as nome_socio, "filial" as cod_qualificacao
from estabelecimento t
inner join tmp_cnpjs tp on tp.cnpj=t.cnpj
left join estabelecimento tm on tm.cnpj_basico=t.cnpj_basico and tm.cnpj<>tp.cnpj
where tm.matriz_filial is "1" --is é mais rapido que igual (igual é muito lento);
Insert INTO tmp_cnpjs (cnpj, grupo, camada)
select distinct ts.cnpj, "{grupo}" as grupo, {cam+1} as camada
From tmp_socios ts
left join tmp_cnpjs tc on tc.cnpj = ts.cnpj
where tc.cnpj is NULL;
Insert INTO tmp_cnpjs (cnpj, grupo, camada)
select distinct cnpj_cpf_socio as cnpj,"{grupo}" as grupo, {cam+1} as camada
From tmp_socios ts
left join tmp_cnpjs tc on tc.cnpj = ts.cnpj_cpf_socio
where (tc.cnpj is NULL) AND (length(cnpj_cpf_socio)=14);
Insert INTO tmp_cpfnomes (cpf, nome, grupo, camada)
select distinct cnpj_cpf_socio as cpf, nome_socio as nome, "{grupo}" as grupo, {cam+1} as camada
From tmp_socios ts
left join tmp_cpfnomes tcn on tcn.cpf = ts.cnpj_cpf_socio and tcn.nome = ts.nome_socio
where tcn.cpf is NULL AND tcn.nome is NULL and length(cnpj_cpf_socio)<>14;
Insert INTO tmp_ids (identificador, grupo, camada)
select distinct "PJ_" || t.cnpj as identificador, t.grupo, t.camada
From tmp_cnpjs t
left join tmp_ids on tmp_ids.identificador = ("PJ_" || t.cnpj)
where tmp_ids.identificador is NULL;
Insert INTO tmp_ids (identificador, grupo, camada)
select distinct "PF_" || t.cpf || "-" || t.nome as identificador, t.grupo, t.camada
From tmp_cpfnomes t
left join tmp_ids on tmp_ids.identificador = ("PF_" || t.cpf || "-" || t.nome)
where t.cpf<>"" and tmp_ids.identificador is NULL;
Insert INTO tmp_ids (identificador, grupo, camada)
select distinct "PE_" || t.nome as identificador, t.grupo, t.camada
From tmp_cpfnomes t
left join tmp_ids on tmp_ids.identificador = ("PE_" || t.nome)
where t.cpf = "" and tmp_ids.identificador is NULL;
'''
for sql in query.split(';'):
con.execute(sql)
#.for cam in range(camada):
if camada==0:
#gambiarra, em camada 0, não apaga a tabela tmp_socios, por isso pega dados de consulta anterior.
query0 = '''
CREATE TABLE tmp_socios AS
SELECT t.cnpj, t.cnpj_cpf_socio, t.nome_socio, sq.qualificacao_socio as cod_qualificacao
FROM socios t
left join socio_qualificacao sq ON sq.codigo=t.qualificacao_socio
limit 0
'''
con.execute(query0)
queryLertmp = '''
Select *
from tmp_ids
where substr(identificador,1,3)='PF_' or substr(identificador,1,3)='PE_'
'''
for k in con.execute(queryLertmp):
kid = k['identificador']
if kid[:3]=='PF_':
_, descricao = id2cpfnome(kid) #kid[15:]
else: #'PE_'
descricao = '(EMPRESA SÓCIA NO EXTERIOR)'
no = {'id': kid, 'descricao':descricao,
'camada': k['camada'],
'situacao_ativa': True,
#'empresa_situacao': 0, 'empresa_matriz': 1, 'empresa_cod_natureza': 0,
'logradouro':'',
'municipio': '', 'uf': ''}
camadasIds[kid] = k['camada']
nosaux.append(copy.deepcopy(no))
querySocios = '''
select *
from tmp_socios
'''
for k in con.execute(querySocios):
ksocio = k['cnpj_cpf_socio']
if len(ksocio)==14:
destino = cnpj2id(ksocio) #'PJ_'+ ksocio
else:
destino = cpfnome2id(ksocio,k['nome_socio']) # 'PF_'+ksocio+'-'+k['nome_socio']
ligacao = {"origem":cnpj2id(k['cnpj']), #'PJ_'+k['cnpj'],
"destino":destino,
"cor": "silver", #"cor":"gray",
"camada":0,
"tipoDescricao":'sócio',
"label":k['cod_qualificacao']} #gdic.dicQualificacao_socio.get(int(k['cod_qualificacao']),'').strip()}
ligacoes.append(copy.deepcopy(ligacao))
if logAtivo or not bjson:
conlog = sqlalchemy.create_engine(f"sqlite:///{camDbSqlite}", execution_options=gEngineExecutionOptions)
conlog.execute('create table if not exists log_cnpjs (cnpj text, grupo text, camada text)')
conlog.execute('''insert into log_cnpjs
select * from tmp_cnpjs; ''')
conlog.execute('create table if not exists log_cpfnomes (cpf text, nome text, grupo text, camada text);')
conlog.execute('''insert into log_cpfnomes
select cpf, nome, grupo, cast(camada as int) from tmp_cpfnomes; ''')
conlog = None
if not bjson:
con = None
return len(camadasIds)
for k in con.execute('Select * from tmp_cnpjs'):
kcnpj = k['cnpj']
cnpjs.add(kcnpj)
#camadasIds[kcnpj] = k['camada']
camadasIds[cnpj2id(kcnpj)] = k['camada']
nos = dadosDosNosCNPJs(con=con, cnpjs=cnpjs, nosaux=nosaux, dicRazaoSocial=dicRazaoSocial, camadasIds=camadasIds)
#camada=0, pega só os dados
if camada>0:
#esta chamada de endereco estava deixando a rotina lenta. Ficou OK removendo a criação de tabela prévia.
#jsonEnderecos = camadaLink(cpfcnpjIn='',conCNPJ=con, camada=1, grupo=grupo, listaIds=list(camadasIds.keys()), tipoLink='endereco')
ids = set() #{'PJ_' + icnpj for icnpj in cnpjs}
for item in camadasIds.keys():
prefixo = ''
try: #se for cpfnome, é tuple e não consigo separar o pedaço.
prefixo = item[:3]
if prefixo[2]=='_' and prefixo!='PF_':
ids.add(item)
except:
continue
jsonEnderecos = camadaLink(cpfcnpjIn='',conCNPJ=con, camada=1, grupo=grupo, listaIds=ids, tipoLink='endereco')
#nos.extend([copy.deepcopy(item) for item in jsonEnderecos['no'] if item['id'] not in camadasIds])
#nos.extend([item for item in jsonEnderecos['no'] if item['id'] not in camadasIds]) #ISTO é mais lento que for???
for item in jsonEnderecos['no']:
if item['id'] not in camadasIds:
nos.append(item)
ligacoes.extend(jsonEnderecos['ligacao'])
#print(' jsonenderecos-fim: ' + ' '.join(str(time.ctime()).split()[3:]))
textoJson={'no': nos, 'ligacao':ligacoes, 'mensagem':mensagem}
con = None
#print(listaIds)
#print(textoJson)
#print(' fim: ' + time.ctime())
#print(' fim: ' + ' '.join(str(time.ctime()).split()[3:]))
return textoJson
#.def camadasRede
def dadosDosNosCNPJs(con, cnpjs, nosaux, dicRazaoSocial, camadasIds):
dftmptable = pd.DataFrame({'cnpj' : list(cnpjs)})
dftmptable['grupo'] = ''
dftmptable['camada'] = 0
#con.execute('DELETE FROM tmp_cnpjs')
dftmptable.to_sql('tmp_cnpjsdados', con=con, if_exists='replace', index=False, dtype=dtype_tmp_cnpjs)
# query = '''
# SELECT t.cnpj, razao_social, situacao, matriz_filial,
# tipo_logradouro, logradouro, numero, complemento, bairro,
# municipio, uf, cod_nat_juridica
# FROM empresas t
# INNER JOIN tmp_cnpjsdados tp on tp.cnpj=t.cnpj
# ''' #pode haver empresas fora da base de teste
query = '''
SELECT tt.cnpj, te.razao_social, tt.situacao_cadastral as situacao, tt.matriz_filial,
tt.tipo_logradouro, tt.logradouro, tt.numero, tt.complemento, tt.bairro,
ifnull(tm.municipio,tt.nome_cidade_exterior) as municipio, tt.uf as uf, te.natureza_juridica as cod_nat_juridica
from tmp_cnpjsdados tp
inner join estabelecimento tt on tt.cnpj = tp.cnpj
left join empresas te on te.cnpj_basico = tt.cnpj_basico --trocar por inner join deixa a consulta lenta...
left join municipio tm on tm.cod_municipio=tt.municipio
''' #pode haver empresas fora da base de teste
setCNPJsRecuperados = set()
for k in con.execute(query):
listalogradouro = [j.strip() for j in [k['logradouro'].strip(), k['numero'], k['complemento'].strip(';'), k['bairro']] if j.strip()]
logradouro = ', '.join(listalogradouro)
no = {'id': cnpj2id(k['cnpj']), 'descricao': k['razao_social'],
'camada': camadasIds[cnpj2id(k['cnpj'])], 'tipo':0, 'situacao_ativa': int(k['situacao'])==2,
'logradouro': f'''{k['tipo_logradouro']} {logradouro}''',
'municipio': k['municipio'], 'uf': k['uf'], 'cod_nat_juridica':k['cod_nat_juridica']
}
nosaux.append(copy.deepcopy(no))
setCNPJsRecuperados.add(k['cnpj'])
#trata caso excepcional com base de teste, cnpj que é sócio não tem registro na tabela empresas
diffCnpj = cnpjs.difference(setCNPJsRecuperados)
for cnpj in diffCnpj:
no = {'id': cnpj2id(cnpj), 'descricao': dicRazaoSocial.get(cnpj, 'NÃO FOI LOCALIZADO NA BASE'),
'camada': camadasIds[cnpj2id(cnpj)], 'tipo':0, 'situacao_ativa': True,
'logradouro': '',
'municipio': '', 'uf': '', 'cod_nat_juridica':''
}
nosaux.append(copy.deepcopy(no))
#ajusta nos, colocando label
nosaux=ajustaLabelIcone(nosaux)
nos = nosaux #nosaux[::-1] #inverte, assim os nos de camada menor serao inseridas depois, ficando na frente
nos.sort(key=lambda n: n['camada'], reverse=True) #inverte ordem, porque os últimos icones vão aparecer na frente. Talvez na prática não seja útil.
con.execute('DROP TABLE if exists tmp_cnpjsdados ')
return nos
#.def dadosDosNosCNPJs
@timeit
def camadaLink(cpfcnpjIn='', conCNPJ=None, camada=1, numeroItens=15, valorMinimo=0, valorMaximo=0, grupo='', bjson=True, listaIds=None, tipoLink='link'):
#se cpfcnpjIn=='', usa dados das tabelas tmp_cnpjs e tmp_cpfnomes, não haverá camada=0
#se fromTmpTable=False, espera que cpfcnpjIn='cpf-nome;cnpj;nome...'
#se fromTmpTable=True, ignora cpfcnpjIn e pega dados a partir de tmp_cnpjs e tmp_cpfnomes
#se numeroItens=0 ou <0, fica sem limite
#print('INICIANDO-------------------------')
#print(f'camadasLink ({camada})-{cpfcnpjIn}-inicio: ' + time.ctime() + ' ', end='')
mensagem = {'lateral':'', 'popup':'', 'confirmar':''}
if tipoLink=='endereco':
if not caminhoDBEnderecoNormalizado:
#mensagem['popup'] = 'Não há tabela de enderecos configurada.'
return {'no': [], 'ligacao':[], 'mensagem': mensagem}
if tipoLink=='link':
if not caminhoDBLinks:
mensagem['popup'] = 'Não há tabela de links configurada.'
return {'no': [], 'ligacao':[], 'mensagem': mensagem}
con = sqlalchemy.create_engine(f"sqlite:///{caminhoDBLinks}",execution_options=gEngineExecutionOptions)
tabela = 'links'
bValorInteiro = False
query = f''' SELECT * From (
SELECT t.id1, t.id2, t.descricao, t.valor
FROM {tabela} t
INNER JOIN tmp_ids tl
ON tl.identificador = t.id1
UNION
SELECT t.id1, t.id2, t.descricao, t.valor
FROM {tabela} t
INNER JOIN tmp_ids tl
ON tl.identificador = t.id2
) ORDER by valor DESC
'''
elif tipoLink=='endereco':
con = sqlalchemy.create_engine(f"sqlite:///{caminhoDBEnderecoNormalizado}", execution_options=gEngineExecutionOptions)
#tabela = 'link_endereco'
tabela = 'link_ete'
valorMinimo=0
valorMaximo=0
numeroItens=0
bValorInteiro = True
# query = f''' SELECT t.id1, t.id2, t.descricao, t.valor
# FROM {tabela} t
# INNER JOIN tmp_ids tl ON tl.identificador = t.id1
# UNION
# SELECT t.id1, t.id2, t.descricao, t.valor
# FROM {tabela} t
# INNER JOIN tmp_ids tl ON tl.identificador = t.id2
# '''
query = f''' SELECT distinct t.id1, t.id2, t.descricao, t.valor
FROM tmp_ids tl
INNER JOIN {tabela} t ON tl.identificador = t.id1
UNION
SELECT t.id1, t.id2, t.descricao, t.valor
FROM tmp_ids tl
INNER JOIN {tabela} t ON tl.identificador = t.id2
'''
else:
print('tipoLink indefinido')
grupo = str(grupo)
nosaux = []
#nosids = set()
ligacoes = []
setLigacoes = set()
camadasIds, cnpjs, cpfnomes = criaTabelasTmpParaCamadas(con, cpfcnpjIn=cpfcnpjIn, listaIds=listaIds, grupo=grupo)
#print( 'nosids', nosids )
cnpjsInicial = copy.copy(cnpjs)
dicRazaoSocial = {} #excepcional, se um cnpj que é sócio na tabela de socios não tem cadastro na tabela empresas
limite = numeroItens #15
#passo = numeroItens*2 #15
#cnt1 = collections.Counter() #contadores de links para o id1 e id2
#cnt2 = collections.Counter()
cntlink = collections.Counter()
for cam in range(camada):
#no sqlite, o order by é feito após o UNION.
#ligacoes = [] #tem que reiniciar a cada loop
#orig_destAnt = ()
#tem que mudar o método, teria que fazer uma query para cada entrada
for k in con.execute(query + ' LIMIT ' + str(limite) if limite else query):
if not(k['id1']) or not(k['id2']):
print('####link invalido!!!', k['id1'], k['id2'], k['descricao'], k['valor'])
continue #caso a tabela esteja inconsistente
#limita a quantidade de ligacoes por item
if numeroItens>0:
if cntlink[k['id1']]>numeroItens or cntlink[k['id2']]>numeroItens:
continue
if valorMinimo:
if k['valor']<valorMinimo:
continue
if valorMaximo:
if valorMaximo < k['valor']:
continue
cntlink[k['id1']] += 1
cntlink[k['id2']] += 1
#nosids.add(k['id1'])
#nosids.add(k['id2'])
if k['id1'] not in camadasIds:
camadasIds[k['id1']] = cam+1
if k['id2'] not in camadasIds:
camadasIds[k['id2']] = cam+1
#neste caso, não deve haver ligação repetida, mas é necessário colocar uma verificação se for ligações generalizadas
# if orig_destAnt == ('PJ_'+k['cnpj'], destino):
# print('XXXXXXXXXXXXXX repetiu ligacao', orig_destAnt)
# orig_destAnt = ('PJ_'+k['cnpj'], destino)
if (k['id1'], k['id2']) not in setLigacoes: #cam+1==camada and bjson: #só pega dados na última camada
ligacao = {"origem":k['id1'], "destino":k['id2'],
"cor": "silver" if tipoLink=='endereco' else "gold", #"cor":"gray",
"camada":cam+1, "tipoDescricao":'link',"label":k['descricao'] + ':' + ajustaValor(k['valor'], bValorInteiro)}
ligacoes.append(copy.deepcopy(ligacao))
setLigacoes.add((k['id1'], k['id2']))
else:
print('####ligacao repetida. A implementar')
#.for k in con.execute(query):
listaProximaCamada = [item for item in camadasIds if camadasIds[item]>cam]
dftmptable = pd.DataFrame({'identificador' : listaProximaCamada})
dftmptable['grupo'] = grupo
dftmptable['camada'] = dftmptable['identificador'].apply(lambda x: camadasIds[x])
#dftmptable['camada'] = dftmptable['cnpj'].map(camadasIds)
#con.execute('DELETE from tmp_ids;')
#dftmptable.set_index('identificador', inplace=True)
dftmptable.to_sql('tmp_ids', con=con, if_exists='replace', index=False, dtype=dtype_tmp_ids)
#curioso, esse índice deixa a busca lenta!!!!
#con.execute('CREATE INDEX ix_tmp_ids_index ON tmp_ids ("identificador")')
limite = limite * numeroItens * 2
# if logAtivo or not bjson:
# conlog = sqlalchemy.create_engine(f"sqlite:///{camDbSqlite}", execution_options={"sqlite_raw_colnames": True})
# conlog.execute('create table if not exists log_cnpjs (cnpj text, grupo text, camada text)')
# conlog.execute('''insert into log_cnpjs
# select * from tmp_cnpjs; ''')
# conlog.execute('create table if not exists log_cpfnomes (cpf text, nome text, grupo text, camada text);')
# conlog.execute('''insert into log_cpfnomes
# select cpf, nome, grupo, cast(camada as int) from tmp_cpfnomes; ''')
# conlog = None
# if not bjson:
# print('camadasRede-fim: ' + time.ctime())
# return len(camadasIds)
#cnpjs = set([c[3:] for c in setOrigDest if c.startswith('PJ_')])
#print('nosids', nosids)
for c in camadasIds:
if c.startswith('PJ_'):
cnpjs.add(c[3:])
else:
if c.startswith('PF_'):
nome = c[15:] #supõe 'PF_12345678901-nome'
no = {'id': c, 'descricao':nome,
'camada': camadasIds[c],
'situacao_ativa': True,
'logradouro':'',
'municipio': '', 'uf': ''}
else: #elif c.startswith('EN_'):
no = {'id': c, 'descricao':'',
'camada': camadasIds[c],
'situacao_ativa': True,
'logradouro':'',
'municipio': '', 'uf': ''}
nosaux.append(copy.deepcopy(no))
# for c in cnpjs:
# camadasIds[c] = camadasIds['PJ_'+c]
if conCNPJ:
conCNPJaux =conCNPJ
else:
conCNPJaux = sqlalchemy.create_engine(f"sqlite:///{camDbSqlite}", execution_options=gEngineExecutionOptions)
cnpjs = cnpjs.difference(cnpjsInicial)
nos = dadosDosNosCNPJs(conCNPJaux, cnpjs, nosaux, dicRazaoSocial, camadasIds)
textoJson={'no': nos, 'ligacao':ligacoes, 'mensagem':mensagem}
con = None
if conCNPJ:
conCNPJaux = None
#print(' fim: ' + time.ctime())
#print('camadaLink fim: ' + ' '.join(str(time.ctime()).split()[3:]))
return textoJson
#.def camadaLink
def apagaLog():
con = sqlalchemy.create_engine(f"sqlite:///{camDbSqlite}", execution_options=gEngineExecutionOptions)
con.execute('DROP TABLE IF EXISTS log_cnpjs;')
con.execute('DROP TABLE IF EXISTS log_cpfnomes;')
con = None
def jsonDados(cpfcnpjIn):
#print('INICIANDO-------------------------')
#dados de cnpj para popup de Dados
#print('jsonDados-inicio: ' + time.ctime())
con = sqlalchemy.create_engine(f"sqlite:///{camDbSqlite}",execution_options=gEngineExecutionOptions)
cnpjs, cpfnomes, outrosIdentificadores = separaEntrada(cpfcnpjIn)
dftmptable = pd.DataFrame({'cnpj' : list(cnpjs)})
dftmptable['grupo']=''
dftmptable['camada']=0
dftmptable.to_sql('tmp_cnpjs1', con=con, if_exists='replace', index=False, dtype=sqlalchemy.types.VARCHAR)
# query = '''
# SELECT *
# FROM empresas t
# INNER JOIN tmp_cnpjs1 tp on tp.cnpj=t.cnpj
# '''
query = '''
select t.*, te.*, ifnull(tm.municipio,t.nome_cidade_exterior) as municipio_texto, tsimples.opcao_mei
from estabelecimento t
inner join tmp_cnpjs1 tp on tp.cnpj=t.cnpj
left join empresas te on te.cnpj_basico=t.cnpj_basico
left join municipio tm on tm.cod_municipio=t.municipio
left join simples tsimples on tsimples.cnpj_basico=t.cnpj_basico
'''
for k in con.execute(query):
d = dict(k)
# capital = d['capital_social']/100 #capital social vem multiplicado por 100
# capital = f"{capital:,.2f}".replace(',','@').replace('.',',').replace('@','.')
# listalogradouro = [k.strip() for k in [d['logradouro'].strip(), d['numero'], d['complemento'].strip(';'), d['bairro']] if k.strip()]
# logradouro = ', '.join(listalogradouro)
# d['cnpj'] = f"{d['cnpj']} - {'Matriz' if d['matriz_filial']=='1' else 'Filial'}"
# d['data_inicio_ativ'] = ajustaData(d['data_inicio_ativ'])
# d['situacao'] = f"{d['situacao']} - {gdic.dicSituacaoCadastral.get(d['situacao'],'')}"
# d['data_situacao'] = ajustaData(d['data_situacao'])
# d['motivo_situacao'] = f"{d['motivo_situacao']}-{gdic.dicMotivoSituacao.get(int(d['motivo_situacao']),'')}"
# d['cod_nat_juridica'] = f"{d['cod_nat_juridica']}-{gdic.dicNaturezaJuridica.get(d['cod_nat_juridica'],'')}"
# d['cnae_fiscal'] = f"{d['cnae_fiscal']}-{gdic.dicCnae.get(int(d['cnae_fiscal']),'')}"
# d['porte'] = f"{d['porte']}-{gdic.dicPorteEmpresa.get(d['porte'],'')}"
# d['endereco'] = f"{d['tipo_logradouro']} {logradouro}"
# d['capital_social'] = capital
capital = d['capital_social'] #capital social vem multiplicado por 100
capital = f"{capital:,.2f}".replace(',','@').replace('.',',').replace('@','.')
listalogradouro = [k.strip() for k in [d['logradouro'].strip(), d['numero'], d['complemento'].strip(';'), d['bairro']] if k.strip()]
logradouro = ', '.join(listalogradouro)
d['cnpj'] = f"{d['cnpj']} - {'Matriz' if d['matriz_filial']=='1' else 'Filial'}"
d['data_inicio_atividades'] = ajustaData(d['data_inicio_atividades'])
d['situacao_cadastral'] = f"{d['situacao_cadastral']} - {gdic.dicSituacaoCadastral.get(d['situacao_cadastral'],'')}"
d['data_situacao_cadastral'] = ajustaData(d['data_situacao_cadastral'])
if d['motivo_situacao_cadastral']=='0':
d['motivo_situacao_cadastral'] = ''
else:
d['motivo_situacao_cadastral'] = f"{d['motivo_situacao_cadastral']}-{gdic.dicMotivoSituacao.get(d['motivo_situacao_cadastral'],'')}"
d['natureza_juridica'] = f"{d['natureza_juridica']}-{gdic.dicNaturezaJuridica.get(d['natureza_juridica'],'')}"
d['cnae_fiscal'] = f"{d['cnae_fiscal']}-{gdic.dicCnae.get(int(d['cnae_fiscal']),'')}"
d['porte_empresa'] = f"{d['porte_empresa']}-{gdic.dicPorteEmpresa.get(d['porte_empresa'],'')}"
d['endereco'] = f"{d['tipo_logradouro']} {logradouro}"
d['capital_social'] = capital
d['municipio'] = d['municipio_texto']
d['opcao_mei'] = d['opcao_mei'] if d['opcao_mei'] else ''
break #só pega primeiro
else:
d = None
con = None
#print('jsonDados-fim: ' + time.ctime())
return d
#.def jsonDados
def ajustaValor(valor, tipoInteiro=False):
if not valor:
return ''
if tipoInteiro:
return '{:.0f}'.format(valor)
if valor>=10000000.0:
v = '{:.0f}'.format(valor/1000000).replace('.',',') + ' MI'
elif valor>=1000000.0:
v = '{:.1f}'.format(valor/1000000).replace('.',',') + ' MI'
elif valor>=10000.0:
v = '{:.0f}'.format(valor/1000).replace('.',',') + ' mil'
elif valor>=1000.0:
v = '{:.1f}'.format(valor/1000).replace('.',',') + ' mil'
else:
v = '{:.2f}'.format(valor).replace('.',',')
return v
def ajustaData(d): #aaaammdd
if d:
return d[-2:]+'/' + d[4:6] + '/' + d[:4]
else:
return ''
def dadosParaExportar(dados):
#print('dadosParaExportar-inicio: ' + time.ctime())
con = sqlalchemy.create_engine(f"sqlite:///{camDbSqlite}", execution_options=gEngineExecutionOptions)
sids = set()
for item in dados['no']:
sids.add(item['id'])
listaCpfCnpjs = list(sids)
criaTabelasTmpParaCamadas(con, listaIds=listaCpfCnpjs, grupo='')
querysocios = '''
SELECT * from
(SELECT t.cnpj, te.razao_social, t.cnpj_cpf_socio, t.nome_socio, sq.qualificacao_socio as cod_qualificacao
FROM socios t
INNER JOIN tmp_cnpjs tl ON tl.cnpj = t.cnpj
left join estabelecimento tt on tt.cnpj=t.cnpj
LEFT JOIN empresas te on te.cnpj_basico=tt.cnpj_basico
left join socio_qualificacao sq ON sq.codigo=t.qualificacao_socio
UNION
SELECT t.cnpj, te.razao_social, t.cnpj_cpf_socio, t.nome_socio, sq.qualificacao_socio as cod_qualificacao
FROM socios t
INNER JOIN tmp_cnpjs tl ON tl.cnpj = t.cnpj_cpf_socio
left join estabelecimento tt on tt.cnpj=t.cnpj
LEFT JOIN empresas te on te.cnpj_basico=tt.cnpj_basico
left join socio_qualificacao sq ON sq.codigo=t.qualificacao_socio
UNION
SELECT t.cnpj, te.razao_social, t.cnpj_cpf_socio, t.nome_socio, sq.qualificacao_socio as cod_qualificacao
FROM socios t
INNER JOIN tmp_cpfnomes tn ON tn.nome= t.nome_socio AND tn.cpf=t.cnpj_cpf_socio
left join estabelecimento tt on tt.cnpj=t.cnpj
LEFT JOIN empresas te on te.cnpj_basico=tt.cnpj_basico
left join socio_qualificacao sq ON sq.codigo=t.qualificacao_socio
)
ORDER BY nome_socio
'''
queryempresas = '''
SELECT te.*, tm.municipio as municipio_, tm.uf as uf_, tt.*
FROM tmp_cnpjs tp
left join estabelecimento tt on tt.cnpj=tp.cnpj
LEFT JOIN empresas te on te.cnpj_basico=tt.cnpj_basico
left join municipio tm on tm.cod_municipio=tt.municipio
'''
from io import BytesIO
output = BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
#workbook = writer.book
dfe=pd.read_sql_query(queryempresas, con)
dfe['capital_social'] = dfe['capital_social'].apply(lambda capital: f"{capital/100:,.2f}".replace(',','@').replace('.',',').replace('@','.'))
dfe['matriz_filial'] = dfe['matriz_filial'].apply(lambda x:'Matriz' if x=='1' else 'Filial')
dfe['data_inicio_atividades'] = dfe['data_inicio_atividades'].apply(ajustaData)
dfe['situacao_cadastral'] = dfe['situacao_cadastral'].apply(lambda x: gdic.dicSituacaoCadastral.get(x,'') if x else '')
dfe['data_situacao_cadastral'] = dfe['data_situacao_cadastral'].apply(ajustaData)
dfe['motivo_situacao_cadastral'] = dfe['motivo_situacao_cadastral'].apply(lambda x: x + '-' + gdic.dicMotivoSituacao.get(x,'') if x else '')
dfe['natureza_juridica'] = dfe['natureza_juridica'].apply(lambda x: x + '-' + gdic.dicNaturezaJuridica.get(x,'') if x else 11)
dfe['cnae_fiscal'] = dfe['cnae_fiscal'].apply(lambda x: x +'-'+ gdic.dicCnae.get(int(x),'') if x else '')
dfe['porte_empresa'] = dfe['porte_empresa'].apply(lambda x: x+'-' + gdic.dicPorteEmpresa.get(x,'') if x else '')
dfe.to_excel(writer, startrow = 0, merge_cells = False, sheet_name = "Empresas", index=False)
dfs=pd.read_sql_query(querysocios, con)
#xxx dfs['cod_qualificacao'] = dfs['cod_qualificacao'].apply(lambda x:x + '-' + gdic.dicQualificacao_socio.get(int(x),''))
dfs.to_excel(writer, startrow = 0, merge_cells = False, sheet_name = "Socios", index=False)
#dfin = pd.DataFrame(listaCpfCnpjs, columns=['cpfcnpj'])
dfin = | pd.DataFrame.from_dict(dados['no']) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
""" test function application """
import pytest
from string import ascii_lowercase
from pandas import (date_range, Timestamp,
Index, MultiIndex, DataFrame, Series)
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pandas.compat import product as cart_product
import numpy as np
import pandas.util.testing as tm
import pandas as pd
from .common import MixIn
# describe
# --------------------------------
class TestDescribe(MixIn):
def test_apply_describe_bug(self):
grouped = self.mframe.groupby(level='first')
grouped.describe() # it works!
def test_series_describe_multikey(self):
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
assert_series_equal(result['mean'], grouped.mean(), check_names=False)
assert_series_equal(result['std'], grouped.std(), check_names=False)
assert_series_equal(result['min'], grouped.min(), check_names=False)
def test_series_describe_single(self):
ts = tm.makeTimeSeries()
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x.describe())
expected = grouped.describe().stack()
assert_series_equal(result, expected)
def test_series_index_name(self):
grouped = self.df.loc[:, ['C']].groupby(self.df['A'])
result = grouped.agg(lambda x: x.mean())
assert result.index.name == 'A'
def test_frame_describe_multikey(self):
grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
desc_groups = []
for col in self.tsframe:
group = grouped[col].describe()
# GH 17464 - Remove duplicate MultiIndex levels
group_col = pd.MultiIndex(
levels=[[col], group.columns],
labels=[[0] * len(group.columns), range(len(group.columns))])
group = pd.DataFrame(group.values,
columns=group_col,
index=group.index)
desc_groups.append(group)
expected = pd.concat(desc_groups, axis=1)
tm.assert_frame_equal(result, expected)
groupedT = self.tsframe.groupby({'A': 0, 'B': 0,
'C': 1, 'D': 1}, axis=1)
result = groupedT.describe()
expected = self.tsframe.describe().T
expected.index = pd.MultiIndex(
levels=[[0, 1], expected.index],
labels=[[0, 0, 1, 1], range(len(expected.index))])
tm.assert_frame_equal(result, expected)
def test_frame_describe_tupleindex(self):
# GH 14848 - regression from 0.19.0 to 0.19.1
df1 = DataFrame({'x': [1, 2, 3, 4, 5] * 3,
'y': [10, 20, 30, 40, 50] * 3,
'z': [100, 200, 300, 400, 500] * 3})
df1['k'] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5
df2 = df1.rename(columns={'k': 'key'})
pytest.raises(ValueError, lambda: df1.groupby('k').describe())
pytest.raises(ValueError, lambda: df2.groupby('key').describe())
def test_frame_describe_unstacked_format(self):
# GH 4792
prices = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 24990,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 25499,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 25499}
volumes = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 100000000}
df = pd.DataFrame({'PRICE': prices,
'VOLUME': volumes})
result = df.groupby('PRICE').VOLUME.describe()
data = [df[df.PRICE == 24990].VOLUME.describe().values.tolist(),
df[df.PRICE == 25499].VOLUME.describe().values.tolist()]
expected = pd.DataFrame(data,
index=pd.Index([24990, 25499], name='PRICE'),
columns=['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# nunique
# --------------------------------
class TestNUnique(MixIn):
def test_series_groupby_nunique(self):
def check_nunique(df, keys, as_index=True):
for sort, dropna in cart_product((False, True), repeat=2):
gr = df.groupby(keys, as_index=as_index, sort=sort)
left = gr['julie'].nunique(dropna=dropna)
gr = df.groupby(keys, as_index=as_index, sort=sort)
right = gr['julie'].apply(Series.nunique, dropna=dropna)
if not as_index:
right = right.reset_index(drop=True)
assert_series_equal(left, right, check_names=False)
days = date_range('2015-08-23', periods=10)
for n, m in cart_product(10 ** np.arange(2, 6), (10, 100, 1000)):
frame = DataFrame({
'jim': np.random.choice(
list(ascii_lowercase), n),
'joe': np.random.choice(days, n),
'julie': np.random.randint(0, m, n)
})
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
frame.loc[1::17, 'jim'] = None
frame.loc[3::37, 'joe'] = None
frame.loc[7::19, 'julie'] = None
frame.loc[8::19, 'julie'] = None
frame.loc[9::19, 'julie'] = None
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
check_nunique(frame, ['jim'], as_index=False)
check_nunique(frame, ['jim', 'joe'], as_index=False)
def test_nunique(self):
df = DataFrame({
'A': list('abbacc'),
'B': list('abxacc'),
'C': list('abbacx'),
})
expected = DataFrame({'A': [1] * 3, 'B': [1, 2, 1], 'C': [1, 1, 2]})
result = df.groupby('A', as_index=False).nunique()
tm.assert_frame_equal(result, expected)
# as_index
expected.index = list('abc')
expected.index.name = 'A'
result = df.groupby('A').nunique()
tm.assert_frame_equal(result, expected)
# with na
result = df.replace({'x': None}).groupby('A').nunique(dropna=False)
tm.assert_frame_equal(result, expected)
# dropna
expected = DataFrame({'A': [1] * 3, 'B': [1] * 3, 'C': [1] * 3},
index=list('abc'))
expected.index.name = 'A'
result = df.replace({'x': None}).groupby('A').nunique()
tm.assert_frame_equal(result, expected)
def test_nunique_with_object(self):
# GH 11077
data = pd.DataFrame(
[[100, 1, 'Alice'],
[200, 2, 'Bob'],
[300, 3, 'Charlie'],
[-400, 4, 'Dan'],
[500, 5, 'Edith']],
columns=['amount', 'id', 'name']
)
result = data.groupby(['id', 'amount'])['name'].nunique()
index = | MultiIndex.from_arrays([data.id, data.amount]) | pandas.MultiIndex.from_arrays |
import pandas as pd
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
import numpy as np
import matplotlib.pyplot as plt
from itertools import combinations
import sys
FILEPATH='resources/logging-202009181900.csv'
def extract_data_frames(filepath) -> pd.DataFrame:
return pd.read_csv(filepath)
def extract_metrics(df:pd.DataFrame, service_name:str, training:bool, k) -> pd.DataFrame:
if 'nginx' in service_name:
return extract_gw_metrics(df, service_name, training)
prefix = 'training' if training else 'production'
df = df.filter([
f'{prefix}_metric.cpu',
f'{prefix}_metric.memory',
f'{prefix}_metric.throughput',
f'{prefix}_metric.process_time',
f'{prefix}_metric.errors',
f'{prefix}_metric.objective',
f'best_config.{service_name}-configsmarttuning.MONGO_CONNECTIONS_PER_HOST',
]).copy(deep=True).rename(columns={
f'{prefix}_metric.cpu': 'cpu',
f'{prefix}_metric.memory': 'memory',
f'{prefix}_metric.throughput': 'throughput',
f'{prefix}_metric.process_time': 'process_time',
f'{prefix}_metric.errors': 'errors',
f'{prefix}_metric.objective': 'objective',
f'best_config.{service_name}-configsmarttuning.MONGO_CONNECTIONS_PER_HOST': 'config'
})[k::2].apply(pd.to_numeric, errors='coerce').dropna().reset_index(drop=True)
df['memory'] /= 1024**2
return df
def extract_gw_metrics(df:pd.DataFrame, service_name:str, training:bool) -> pd.DataFrame:
suffix = 'train' if training else 'prod'
df = df.filter([
f'overall_metrics_{suffix}.cpu',
f'overall_metrics_{suffix}.memory',
f'overall_metrics_{suffix}.throughput',
f'overall_metrics_{suffix}.process_time',
f'overall_metrics_{suffix}.errors',
f'overall_metrics_{suffix}.objective',
]).copy(deep=True).rename(columns={
f'overall_metrics_{suffix}.cpu': 'cpu',
f'overall_metrics_{suffix}.memory': 'memory',
f'overall_metrics_{suffix}.throughput': 'throughput',
f'overall_metrics_{suffix}.process_time': 'process_time',
f'overall_metrics_{suffix}.errors': 'errors',
f'overall_metrics_{suffix}.objective': 'objective',
})[::2].apply(pd.to_numeric, errors='coerce').dropna().reset_index(drop=True)
df['memory'] /= 1024 ** 2
return df
def label_point(x, y, val, ax:plt.Axes):
a = | pd.concat({'x': x, 'y': y, 'val': val}, axis=1) | pandas.concat |
#!/usr/bin/env python
### Up to date as of 10/2019 ###
'''Section 0: Import python libraries
This code has a number of dependencies, listed below.
They can be installed using the virtual environment "slab23"
that is setup using script 'library/setup3env.sh'.
Additional functions are housed in file 'slab2functions.py'
and imported below.
There are some additional dependencies used by the function file
that do not need to be installed separately.
'''
# stdlib imports
from datetime import datetime
import os.path
import argparse
import numpy as np
from pandas import DataFrame
import pandas as pd
import warnings
import slab2functions as s2f
import math
import mapio.gmt as gmt
from functools import partial
from multiprocess import Pool
import loops as loops
from scipy import ndimage
import psutil
import cProfile
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def main(args):
'''Section 1: Setup
In this section:
(1) Identify necessary input files
(2) Load parameters from '[slab]input.par'
(3) Define optional boxes for PDF/print testing
(4) Define output file names
(5) Gathering optional arguments, setting defaults
(6) Define search ellipsoid parameters
(7) Define Average active source profiles
(8) Define reference model (Slab1.0 and/or slab guides)
(9) Define Trench Locations
(10) Open and modify input dataset
(11) Calculate seismogenic zone thickness
(12) Record variable parameters used for this model
(13) Define search grid
(14) Identify tomography datasets
(15) Initialize arrays for Section 2 '''
print('Start Section 1 of 7: Setup')
print(' Loading inputs...')
''' ------ (1) Identify necessary input files ------ '''
trenches = 'library/misc/trenches_usgs_2017_depths.csv'
agesFile = 'library/misc/interp_age.3.2g.nc'
ageerrorsFile = 'library/misc/interp_ageerror.3.2g.nc'
polygonFile = 'library/misc/slab_polygons.txt'
addFile = 'library/misc/addagain.csv'
parFile = args.parFile
pd.options.mode.chained_assignment = None
warnings.filterwarnings("ignore", message="invalid value encountered in less")
warnings.filterwarnings("ignore", message="invalid value encountered in true_divide")
warnings.filterwarnings("ignore", message="invalid value encountered in greater")
warnings.filterwarnings("ignore", message="invalid value encountered in double_scalars")
''' ------ (2) Load parameters from '[slab]input.par' ------'''
for line in open(parFile):
plist = line.split()
if len(plist)>2:
if plist[0] == 'inFile':
inFile = plist[2]
if plist[0] == 'use_box':
use_box = plist[2]
if plist[0] == 'latmin':
latmin = np.float64(plist[2])
if plist[0] == 'latmax':
latmax = np.float64(plist[2])
if plist[0] == 'lonmin':
lonmin = np.float64(plist[2])
if plist[0] == 'lonmax':
lonmax = np.float64(plist[2])
if plist[0] == 'slab':
slab = plist[2]
if plist[0] == 'grid':
grid = np.float64(plist[2])
if plist[0] == 'radius1':
radius1 = np.float64(plist[2])
if plist[0] == 'radius2':
radius2 = np.float64(plist[2])
if plist[0] == 'sdr':
sdr = np.float64(plist[2])
if plist[0] == 'ddr':
ddr = np.float64(plist[2])
if plist[0] == 'taper':
taper = np.float64(plist[2])
if plist[0] == 'T':
T = np.float64(plist[2])
if plist[0] == 'node':
node = np.float64(plist[2])
if plist[0] == 'filt':
filt = np.float64(plist[2])
if plist[0] == 'maxdist':
maxdist = np.float64(plist[2])
if plist[0] == 'minunc':
minunc = np.float64(plist[2])
if plist[0] == 'mindip':
mindip = np.float64(plist[2])
if plist[0] == 'minstk':
minstk = np.float64(plist[2])
if plist[0] == 'maxthickness':
maxthickness = np.float64(plist[2])
if plist[0] == 'seismo_thick':
seismo_thick = np.float64(plist[2])
if plist[0] == 'dipthresh':
dipthresh = np.float64(plist[2])
if plist[0] == 'fracS':
fracS = np.float64(plist[2])
if plist[0] == 'kdeg':
kdeg = np.float64(plist[2])
if plist[0] == 'knot_no':
knot_no = np.float64(plist[2])
if plist[0] == 'rbfs':
rbfs = np.float64(plist[2])
# loop through to find latest slab input file if specified
polyname = slab
if slab == 'kur' or slab == 'izu':
polyname = 'jap'
if inFile == 'latest':
yearmax = 0
monthmax = 0
for filename in os.listdir('Input'):
if filename.endswith('.csv'):
try:
slabname,datei,instring = filename.split('_')
except:
continue
if slabname == polyname and instring == 'input.csv':
try:
monthi, yeari = datei.split('-')
except:
continue
yeari = int(yeari)
monthi = int(monthi)
if yeari >= yearmax:
yearmax = yeari
inFile = 'Input/%s'%filename
if monthi > monthmax:
monthmax = monthi
inFile = 'Input/%s'%filename
print (' using input file: %s'%inFile)
if slab == 'mue' or slab == 'phi' or slab == 'cot' or slab == 'sul' or slab == 'ryu':
if args.undergrid is None:
if slab == 'mue':
print ('This slab is truncated by the Caribbean (car) slab, argument -u cardepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'cot':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'sul':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'phi':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'ryu':
print ('This slab is truncated by the Kurils-Japan (kur) slab, argument -u kurdepgrid is required')
print ('Exiting .... ')
exit()
else:
undergrid = args.undergrid
''' ------ (4) Define output file names ------ '''
date = datetime.today().strftime('%m.%d.%y')
now = datetime.now()
time = '%s.%s' % (now.hour, now.minute)
folder = '%s_slab2_%s' % (slab, date)
os.system('mkdir Output/%s'%folder)
outFile = 'Output/%s/%s_slab2_res_%s.csv' % (folder, slab, date)
dataFile = 'Output/%s/%s_slab2_dat_%s.csv' % (folder, slab, date)
nodeFile = 'Output/%s/%s_slab2_nod_%s.csv' % (folder, slab, date)
fillFile = 'Output/%s/%s_slab2_fil_%s.csv' % (folder, slab, date)
rempFile = 'Output/%s/%s_slab2_rem_%s.csv' % (folder, slab, date)
clipFile = 'Output/%s/%s_slab2_clp_%s.csv' % (folder, slab, date)
these_params = 'Output/%s/%s_slab2_par_%s.csv' % (folder, slab, date)
datainfo = 'Output/%s/%s_slab2_din_%s.csv' % (folder, slab, date)
nodeinfo = 'Output/%s/%s_slab2_nin_%s.csv' % (folder, slab, date)
suppFile = 'Output/%s/%s_slab2_sup_%s.csv' % (folder, slab, date)
nodexFile = 'Output/%s/%s_slab2_nox_%s.csv' % (folder, slab, date)
nodeuFile = 'Output/%s/%s_slab2_nou_%s.csv' % (folder, slab, date)
depTextFile = 'Output/%s/%s_slab2_dep_%s.txt' % (folder, slab, date)
depGridFile = 'Output/%s/%s_slab2_dep_%s.grd' % (folder, slab, date)
strTextFile = 'Output/%s/%s_slab2_str_%s.txt' % (folder, slab, date)
strGridFile = 'Output/%s/%s_slab2_str_%s.grd' % (folder, slab, date)
dipTextFile = 'Output/%s/%s_slab2_dip_%s.txt' % (folder, slab, date)
dipGridFile = 'Output/%s/%s_slab2_dip_%s.grd' % (folder, slab, date)
uncTextFile = 'Output/%s/%s_slab2_unc_%s.txt' % (folder, slab, date)
uncGridFile = 'Output/%s/%s_slab2_unc_%s.grd' % (folder, slab, date)
thickTextFile = 'Output/%s/%s_slab2_thk_%s.txt' % (folder, slab, date)
thickGridFile = 'Output/%s/%s_slab2_thk_%s.grd' % (folder, slab, date)
savedir = 'Output/%s'%folder
''' ------ (3) Define optional boxes for PDF/print testing ------'''
if args.test is not None:
testlonmin = args.test[0]
testlonmax = args.test[1]
testlatmin = args.test[2]
testlatmax = args.test[3]
if testlonmin < 0:
testlonmin += 360
if testlonmax < 0:
testlonmax += 360
testarea = [testlonmin, testlonmax, testlatmin, testlatmax]
printtest = True
os.system('mkdir Output/PDF%s' % (slab))
os.system('mkdir Output/multitest_%s' % (slab))
f = open(datainfo, 'w+')
f.write('dataID, nodeID, used_or_where_filtered')
f.write('\n')
f.close()
f = open(datainfo, 'w+')
f.write('nodeID, len(df), status, details')
f.write('\n')
f.close()
else:
# an area not in range of any slab polygon
testarea = [220, 230, 15, 20]
printtest = False
''' --- (5) Gathering optional arguments, setting defaults ---'''
if use_box == 'yes':
check = 1
slab = s2f.rectangleIntersectsPolygon(lonmin, lonmax, latmin,
latmax, polygonFile)
if isinstance(slab, str):
slab = slab
else:
try:
slab = slab[0]
except:
print('System exit because box does not intersect slab polygon')
raise SystemExit()
elif use_box == 'no':
check = 0
lon1, lon2, lat1, lat2 = s2f.determine_polygon_extrema(slab,
polygonFile)
lonmin = float(lon1)
lonmax = float(lon2)
latmin = float(lat1)
latmax = float(lat2)
else:
print('use_box in slab2input.par must be "yes" or "no"')
raise SystemExit()
''' ------ (6) Define search ellipsoid parameters ------'''
alen = radius1
blen = radius2
ec = math.sqrt(1-((math.pow(blen, 2))/(math.pow(alen, 2))))
mdist = alen * ec
''' ------ (7) Define Average active source profiles ------'''
# Different because alu is variable E/W
if slab == 'alu':
AA_data = pd.read_csv('library/avprofiles/alu_av5.csv')
global_average = False
elif slab == 'him':
AA_data = pd.read_csv('library/avprofiles/him_av.csv')
global_average = False
elif slab == 'kur' or slab == 'izu':
AA_source = 'library/avprofiles/%s_av.txt' % 'jap'
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
AA_data = AA_data[AA_data.dist < 125]
global_average = False
# Use RF data like AA data to constrain flat slab in Mexico
elif slab == 'cam':
AA_source = 'library/avprofiles/%s_av.txt' % slab
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
RF_data = pd.read_csv('library/avprofiles/cam_RF_av.csv')
AA_data = pd.concat([AA_data,RF_data],sort=True)
global_average = False
else:
global_average = False
# See if there is a averace active source profile for this slab
try:
AA_source = 'library/avprofiles/%s_av.txt' % slab
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
# If there is no profile for this slab, use the global profile
except:
AA_global = pd.read_csv('library/avprofiles/global_as_av2.csv')
AA_data = AA_global[['dist', 'depth']]
global_average = True
if slab == 'phi' or slab == 'mue':
AA_data = AA_data[AA_data.dist < 10]
if slab == 'cot':
AA_data = AA_data[AA_data.dist < 10]
if slab == 'ita' or slab == 'puy':
AA_data = AA_data[AA_data.dist < 1]
''' ------ (8) Define reference model (Slab1.0 and/or slab guides) ------'''
polyname = slab
if slab == 'kur' or slab == 'izu':
polyname = 'jap'
# Search for slab guides in library/slabguides
slabguide = None
slabguide2 = None
for SGfile in os.listdir('library/slabguides'):
if SGfile[0:3] == polyname:
SGfile1 = SGfile
slabguide = gmt.GMTGrid.load('library/slabguides/%s'%SGfile1)
# Find secondary slab guide for regions where there are two
if polyname == 'sum' or polyname == 'man' or polyname == 'phi' or polyname =='sam' or polyname == 'sco' or polyname == 'mak' or polyname == 'jap':
for f in os.listdir('library/slabguides'):
if f[0:3] == polyname and f != SGfile:
print ('f',f)
SGfile2 = f
slabguide2 = gmt.GMTGrid.load('library/slabguides/%s'%SGfile2)
break
break
# Get Slab1.0 grid where applicable
try:
depgrid = s2f.get_grid(slab, 'depth')
except:
print (' Slab1.0 does not exist in this region, using slab guide')
depgrid = gmt.GMTGrid.load('library/slabguides/%s'%SGfile1)
slabguide = None
# Calculate strike and dip grids
strgrid, dipgrid = s2f.mkSDgrd(depgrid)
slab1data = s2f.mkSlabData(depgrid, strgrid, dipgrid, printtest)
slab1data.to_csv('gradtest.csv',header=True,index=False)
# Add slab guide data to Slab1.0 grids where necessary
if slabguide is not None:
print ('slab guide for this model:',slabguide)
guidestr, guidedip = s2f.mkSDgrd(slabguide)
guidedata = s2f.mkSlabData(slabguide, guidestr, guidedip, printtest)
if SGfile1 == 'phi_SG_north':
guidedata = guidedata[guidedata.lat>14]
elif slab == 'ryu':
guidedata = guidedata[guidedata.lon>137]
slab1data = slab1data[slab1data.lat<=137]
slab1data = pd.concat([slab1data, guidedata],sort=True)
slab1data = slab1data.reset_index(drop=True)
if slabguide2 is not None:
print ('secondary slab guide for this model:',slabguide2)
guidestr, guidedip = s2f.mkSDgrd(slabguide2)
guidedata = s2f.mkSlabData(slabguide2, guidestr, guidedip, printtest)
if SGfile2 == 'phi_SG_north':
guidedata = guidedata[guidedata.lat>14]
slab1data = | pd.concat([slab1data, guidedata],sort=True) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[35]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# In[36]:
heart = | pd.read_csv('heart_modified.csv') | pandas.read_csv |
"""
The DataOracle class reads real historical trade data (not price or quote)
from a given date in history to be resimulated. It stores these trades
in a time-sorted array at maximum resolution. It can be called by
certain "background" agents to obtain noisy observations about the "real"
price of a stock at a current time. It is intended to provide some realistic
behavior and "price gravity" to the simulated market -- i.e. to make the
market behave something like historical reality in the absence of whatever
experiment we are running with more active agent types.
"""
import datetime as dt
import os
from bisect import bisect_left
from math import sqrt, exp
from typing import List
import numpy as np
import pandas as pd
from joblib import Memory
from backtesting.oracle.base import Oracle
from backtesting.typing import FileName
from backtesting.utils.util import log_print
mem = Memory(cachedir='./cache', verbose=0)
__all__ = (
"DataOracle",
"ExternalFileOracle",
"MeanRevertingOracle",
"SparseMeanRevertingOracle"
)
# @mem.cache
def read_trades(trade_file: FileName, symbols: List[str]) -> pd.DataFrame:
log_print("Data not cached. This will take a minute...")
df = | pd.read_pickle(trade_file, compression='bz2') | pandas.read_pickle |
from itertools import product
import numpy as np
import pandas as pd
def RC_dummies(df):
df["R"] = df["R"].astype(str)
df["C"] = df["C"].astype(str)
df = | pd.get_dummies(df) | pandas.get_dummies |
#!/usr/bin/env python3
from argparse import ArgumentParser
from collections import defaultdict
import os
import sys
import matplotlib
matplotlib.use('pdf')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams['font.size'] = 12
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import seaborn as sns
import statsmodels.stats.contingency_tables
import tqdm
from janus.pipeline import pipeline_to_tree as pt
from janus.repair.local_rules import (
is_match_edit,
edit_to_str,
)
from janus import utils
def add_timestamp_percentile(df, percentiles, percentiles_labels):
unique_ts = df.groupby(["dataset", "id"]).head(1)
percentile_map = {}
for d in unique_ts["dataset"].unique():
unique_ts_d = unique_ts[unique_ts["dataset"] == d]
unique_ts_d = unique_ts_d.sort_values("timestamp", ascending=True)
percents = pd.qcut(
unique_ts_d["timestamp"], percentiles, labels=percentiles_labels)
for i, p in zip(unique_ts_d["id"], percents):
percentile_map[(d, i)] = p
return [percentile_map[(d, i)] for d, i in zip(df["dataset"], df["id"])]
def prepare_df(df, compute_dist=False):
df_orig = df[df["type"] == "orig"]
df_orig = df_orig[~df_orig["failed"]]
# make sure we only consider dataset/id where we have the orig
# for all strategies
unique_strategies = df["strategy"].unique()
n_strategies = len(unique_strategies)
strategy_cts = df_orig.groupby(
["dataset", "id"])["strategy"].agg(lambda x: len(set(x)))
strategy_cts = strategy_cts.to_frame(name="strategy_cts").reset_index()
df_orig = pd.merge(df_orig, strategy_cts, how="left", on=["dataset", "id"])
df_orig = df_orig[df_orig["strategy_cts"] == n_strategies]
df_repaired = df[df["type"] == "repair"]
df_repaired = df_repaired[~df_repaired["failed"]]
df_repaired = df_repaired.sort_values("mean_test_score", ascending=False)
# there should only be one secore per dataset/id/strategy
assert df_repaired.groupby(["dataset", "strategy", "id"]).size().max() == 1
df_orig = df_orig[[
"dataset",
"strategy",
"id",
"test_scores",
"mean_test_score",
"graph",
"timestamp",
"repair_time",
]]
df_repaired = df_repaired[[
"dataset",
"strategy",
"id",
"mean_test_score",
"graph",
"test_scores",
"repair_time",
]]
df_combined = pd.merge(
df_orig,
df_repaired,
how="left",
on=["dataset", "strategy", "id"],
suffixes=("_orig", "_repaired"),
)
if compute_dist:
dist = [
None if pd.isnull(repaired) else pt.tree_edit_distance(
pt.to_pipeline(orig), pt.to_pipeline(repaired))
for orig, repaired in tqdm.tqdm(
list(
zip(df_combined["graph_orig"],
df_combined["graph_repaired"])))
]
else:
dist = np.nan
df_combined["distance"] = dist
# assign "row" to timestamp-based quartile
df_combined["ts_quartile"] = add_timestamp_percentile(
df_combined,
[0.0, 0.25, 0.5, 0.75, 1.0],
["0-0.25", "0.25-0.5", "0.5-0.75", "0.75-1.0"],
)
df_combined["ts_decile"] = add_timestamp_percentile(
df_combined, np.arange(0, 1.1, 0.1),
(lambda x: ["{:.1f}-{:.1f}".format(i, j) for i, j in zip(x, x[1:])])(
np.arange(0, 1.1, 0.1)))
df_combined["score_diff"] = df_combined[
"mean_test_score_repaired"] - df_combined["mean_test_score_orig"]
df_combined["had_effect"] = df_combined["score_diff"].abs() >= 0.01
df_combined["improved"] = (df_combined["score_diff"] >
0) & df_combined["had_effect"]
df_combined["improved_int"] = df_combined["improved"].astype(int)
df_combined["hurt"] = (df_combined["score_diff"] <
0) & df_combined["had_effect"]
df_combined["has_repair"] = ~pd.isnull(
df_combined["mean_test_score_repaired"])
df_combined["dummy"] = 1
return df_combined
def stat_by_strategy(df, col, groupcols=None, op="mean"):
if groupcols is None:
groupcols = ["dataset", "strategy"]
assert "strategy" in groupcols
stat_df = df.groupby(groupcols)[col].agg(op)
stat_df = stat_df.to_frame(name=col)
stat_df = stat_df.reset_index()
index_cols = list(groupcols)
index_cols.remove("strategy")
pv_stat_df = pd.pivot_table(
data=stat_df, index=index_cols, columns="strategy", values=col)
pv_stat_df = pv_stat_df.reset_index()
pv_stat_df.columns.name = None
return pv_stat_df
def summarize_df(df):
df = df.copy()
# now append version with "overall" (i.e. agg) ts_quartile
df_overall = df.copy()
df_overall["ts_quartile"] = "overall"
df = pd.concat((df, df_overall), axis=0).reset_index(drop=True)
groupcols = ["dataset", "ts_quartile", "strategy"]
print("Stat: Number of pipelines in experiments")
print(stat_by_strategy(df, "dummy", groupcols=groupcols, op="sum"), "\n")
print("Stat: Fraction w/ available 'repair'")
print(stat_by_strategy(df, "has_repair", groupcols=groupcols, op="mean"),
"\n")
print("Stat: Fraction improved")
print(stat_by_strategy(df, "improved", groupcols=groupcols, op="mean"),
"\n")
print("Stat: Total number improved")
print(stat_by_strategy(
df[df["ts_quartile"] == "overall"],
"improved_int",
groupcols=["dataset", "strategy"],
op="sum"), "\n")
print("Stat: Mean score diff")
print(stat_by_strategy(df, "score_diff", groupcols=groupcols, op="mean"),
"\n")
print("Stat: Mean score diff (if improvement)")
print(stat_by_strategy(
df[df["score_diff"] > 0], "score_diff", groupcols=groupcols,
op="mean"), "\n")
df = df.sort_values("score_diff", ascending=False)
best_df = df.groupby(["dataset", "ts_quartile", "id"]).head(1)
print(
"Stat: Number of dataset/pipeline where a strategy gets largest score improvement"
)
print(stat_by_strategy(best_df, "dummy", groupcols=groupcols, op="sum"))
print("Stat: Mean distance (if improvement)")
print(stat_by_strategy(
df[df["score_diff"] > 0], "distance", groupcols=groupcols, op="mean"),
"\n")
print("Stat: Mean repair time")
print(stat_by_strategy(
df,
"repair_time_repaired",
groupcols=["dataset", "strategy"],
op="mean"))
def get_palette(df):
strats = sorted(df["strategy"].unique())
colors = sns.color_palette("colorblind", len(strats))
return strats, {s: c for s, c in zip(strats, colors)}
def get_bootstrap(func, vals, num_iters, low, hi, random_state=None):
num_obs = len(vals)
rng = np.random.RandomState(random_state)
boot_samples = rng.choice(vals, size=(num_iters, num_obs))
boot_ests = np.apply_along_axis(func, 1, boot_samples)
obs_val = func(vals)
boot_diffs = boot_ests - obs_val
low_diff = np.percentile(boot_diffs, low)
hi_diff = np.percentile(boot_diffs, hi)
# note the order of the differences
result = (obs_val - hi_diff, obs_val, obs_val - low_diff)
assert result[0] < result[1] < result[2]
return result
def get_rng(strategy):
return sum(ord(c) for c in strategy)
def table_fraction_outcome(df, column, get_bold, random_state=None):
mean_with_ci = lambda d, rs: get_bootstrap(
np.mean, d, 1000, 5.0, 95.0, random_state=rs
)
df_res = df.groupby(["dataset", "strategy"]).apply(
lambda d: mean_with_ci(d[column].values, get_rng(d["strategy"].iloc[0]))
)
df_res = df_res.to_frame(name="change_with_ci").reset_index()
format_text = "{:.2f} ({:.2f}-{:.2f})"
df_res["val_text"] = df_res["change_with_ci"].map(
lambda t: format_text.format(t[1], t[0], t[2]))
df_res["mean_val"] = df_res["change_with_ci"].map(lambda t: t[1])
# bold the highest value per dataset
df_res["rank"] = df_res.groupby(["dataset"])["mean_val"].rank(
"dense", ascending=False)
df_res["with_bold"] = df_res.groupby(["dataset"])["mean_val"].apply(get_bold)
df_res["val_text"] = [
"\\textbf{{{}}}".format(txt) if bold else txt
for bold, txt in zip(df_res["with_bold"], df_res["val_text"])
]
df_res = df_res[["dataset", "strategy", "val_text"]]
df_pv = pd.pivot(
df_res, index="dataset", columns="strategy", values="val_text")
df_pv = df_pv.reset_index()
# escape ourselves
df_pv["dataset"] = df_pv["dataset"].map(lambda x: x.replace("_", "\\_"))
return df_pv
def table_fraction_repaired(df, random_state=None):
return table_fraction_outcome(
df, "improved", lambda x: x == max(x), random_state=random_state)
def table_fraction_hurt(df, random_state=None):
return table_fraction_outcome(
df, "hurt", lambda x: x == min(x), random_state=random_state)
def plot_fraction_repaired(df):
fig, ax = plt.subplots(1)
hue_order, palette = get_palette(df)
sns.barplot(
data=df,
x="improved",
y="dataset",
hue="strategy",
estimator=np.mean,
# linestyles=["None"] * len(df["strategy"].unique()),
dodge=True,
ci=95,
ax=ax,
orient="h",
palette=palette,
hue_order=hue_order,
)
ax.set_xlabel("Fraction of Pipelines Improved")
ax.set_ylabel("Dataset")
plt.legend(
loc="center right", bbox_to_anchor=(0.0, 1.05, 1., .102), ncol=2)
plt.tight_layout()
return ax
def plot_fraction_repaired_rank(df):
# rank systems
# by fraction repaired
# and then count the ranks
fig, ax = plt.subplots(1)
df_frac = df.groupby(["dataset", "strategy"])[["improved"]].mean()
df_frac = df_frac.reset_index()
df_frac["rank"] = df_frac.groupby(["dataset"])["improved"].rank(
"dense", ascending=False)
hue_order, palette = get_palette(df_frac)
sns.countplot(
data=df_frac,
x="rank",
hue="strategy",
dodge=True,
ax=ax,
palette=palette,
hue_order=hue_order,
)
ax.set_xlabel("Rank")
ax.set_ylabel("Datasets")
plt.legend(
loc="center right", bbox_to_anchor=(0.0, 1.05, 1., .102), ncol=2)
plt.tight_layout()
return ax
def plot_fraction_candidate(df):
fig, ax = plt.subplots(1)
df = df.copy()
sns.barplot(
data=df,
x="has_repair",
y="dataset",
hue="strategy",
estimator=np.mean,
# linestyles=["None"] * len(df["strategy"].unique()),
dodge=True,
ci=95,
ax=ax,
orient="h",
)
ax.set_xlabel("Fraction of Pipelines with Repair Candidate")
ax.set_ylabel("Dataset")
plt.legend(
loc="center right", bbox_to_anchor=(0.0, 1.05, 1., .102), ncol=2)
plt.tight_layout()
return ax
def fraction_repaired_over_time(df):
fig, ax = plt.subplots(1)
sns.pointplot(
data=df,
x="ts_decile",
y="improved",
hue="strategy",
estimator=np.mean,
dodge=True,
ci=95,
ax=ax,
)
plt.xticks(rotation=90)
ax.set_xlabel("Timestamp Decile")
ax.set_ylabel("Fraction of Pipelines Improved")
plt.legend(loc="best", title=None, ncol=2)
plt.tight_layout()
return ax
def plot_score_improvement(df):
all_strategies = df["strategy"].unique()
n_all_strategies = len(all_strategies)
df = df[df["improved"]]
check = df.groupby(["dataset",
"id"])["strategy"].agg(lambda x: len(set(x)))
check = check.to_frame(name="num_strategies").reset_index()
check["drop"] = check["num_strategies"] < n_all_strategies
df = pd.merge(df, check, how="left", on=["dataset", "id"])
df["drop"] = df["drop"].fillna(True)
df = df[~df["drop"]]
fig, ax = plt.subplots(1)
# sns.barplot(
# data=df,
# x="dataset",
# y="score_diff",
# hue="strategy",
# ci=95,
# ax=ax,
# )
sns.pointplot(
data=df,
x="dataset",
y="score_diff",
hue="strategy",
linestyles=["None"] * len(df["strategy"].unique()),
estimator=np.mean,
dodge=10,
ci=95,
ax=ax,
)
ax.set_ylabel("Dataset")
ax.set_xlabel("Score Improvement\n(both improved)")
plt.legend(loc="center right", bbox_to_anchor=(0., 1.02, 1., .102), ncol=2)
plt.tight_layout()
return ax
def plot_score_improvement_st_distance(df):
all_strategies = df["strategy"].unique()
n_all_strategies = len(all_strategies)
# only improvements
df = df[df["improved"]]
# now only consider cases where the random-mutation
# had an edit distance <= the corresponding edit distance
# for the janus repair
df_janus = df[df["strategy"] == "Janus"]
df_janus = df_janus[["dataset", "id", "distance"]]
df_janus = df_janus.rename(columns={"distance": "distance_compare"})
assert df_janus.shape[0] > 0
df = pd.merge(df, df_janus, how="left", on=["dataset", "id"])
df = df[df["distance"] <= df["distance_compare"]]
# both janus and random-mutation have improvement and
# sat the edit distance constraint mentioned above
check = df.groupby(["dataset",
"id"])["strategy"].agg(lambda x: len(set(x)))
check = check.to_frame(name="num_strategies").reset_index()
check["drop"] = check["num_strategies"] < n_all_strategies
df = pd.merge(df, check, how="left", on=["dataset", "id"])
df["drop"] = df["drop"].fillna(True)
df = df[~df["drop"]]
fig, ax = plt.subplots(1)
sns.barplot(
data=df,
x="dataset",
y="score_diff",
hue="strategy",
ci=95,
ax=ax,
)
plt.xticks(rotation=90)
ax.set_xlabel("Dataset")
ax.set_ylabel(
"Score Improvement\n(both improved and\nsatisfy distance threshold)")
plt.legend(loc="best", title=None, ncol=2)
plt.tight_layout()
return ax
def plot_fraction_repaired_st_distance(df):
# now only consider cases where the random-mutation
# had an edit distance <= the corresponding edit distance
# for the janus repair
df_janus = df[df["strategy"] == "Janus"]
df_janus = df_janus[["dataset", "id", "distance"]]
df_janus = df_janus.rename(columns={"distance": "distance_compare"})
assert df_janus.shape[0] > 0
df = pd.merge(df, df_janus, how="left", on=["dataset", "id"])
df = df[df["distance"] <= df["distance_compare"]]
fig, ax = plt.subplots(1)
sns.pointplot(
data=df,
x="dataset",
y="improved",
hue="strategy",
estimator=np.mean,
linestyles=["None"] * len(df["strategy"].unique()),
dodge=True,
ci=95,
ax=ax,
)
plt.xticks(rotation=90)
ax.set_xlabel("Dataset")
ax.set_ylabel("Fraction of Pipelines Improved\n(subject to distance)")
plt.legend(loc="best", title=None, ncol=2)
plt.tight_layout()
return ax
def plot_cdf_score_diff(df, filter_fun=None):
df = df.copy()
# don't count pipelines that were originally broken
df = df[~pd.isnull(df["mean_test_score_orig"])]
# we compute this only for cases where the tools
# produce a repair candidate (i.e. not nan)
df = df[~pd.isnull(df["mean_test_score_repaired"])]
# df["mean_test_score_repaired"] = df["mean_test_score_repaired"].fillna(0.0)
df["score_diff"] = df["mean_test_score_repaired"] - df[
"mean_test_score_orig"]
assert not pd.isnull(df["score_diff"]).any()
if filter_fun is not None:
df = filter_fun(df)
hue_order, palette = get_palette(df)
fig, ax = plt.subplots(1)
sns.ecdfplot(
data=df,
x="score_diff",
hue="strategy",
ax=ax,
palette=palette,
hue_order=hue_order,
)
ax.set_xlabel("Pipeline score change")
ax.set_ylabel("Empirical CDF")
# add avg values to the legend
df_mean = df.groupby("strategy")["score_diff"].mean()
df_mean = df_mean.to_frame(name="mean").reset_index()
mean_map = {s: m for s, m in zip(df_mean["strategy"], df_mean["mean"])}
# create our own legend...for some reason
# ecdf legend is busted when we try to set number of columns
handles = [
matplotlib.patches.Patch(
color=palette[h],
label="{} (avg={}{:.3f})".format(h, "+" if mean_map[h] > 0 else "",
mean_map[h]),
) for h in hue_order
]
plt.legend(loc="best", ncol=1, handles=handles)
# plt.legend(
# loc="center right", bbox_to_anchor=(0.0, 1.05, 1., .102), ncol=2)
plt.tight_layout()
return ax
def plot_dist_score_diff(df):
df = df.copy()
# don't count pipelines that were originally broken
df = df[~pd.isnull(df["mean_test_score_orig"])]
# but for cases where the repaired is broken
# we say that it gets score 0.0
df["mean_test_score_repaired"] = df["mean_test_score_repaired"].fillna(0.0)
df["score_diff"] = df["mean_test_score_repaired"] - df[
"mean_test_score_orig"]
assert not | pd.isnull(df["score_diff"]) | pandas.isnull |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.