prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pciSeq
from pciSeq import utils
import pandas as pd
def preprocess_spots(spots_file, conversion_factor = 0.1625):
spots = | pd.read_csv(spots_file) | pandas.read_csv |
file = '/home/khan_mo/kratos/thesis/important Git Lib/ast/src/predict/predictCSV/eventfiles/overlap_133-0020_201031_231556_indoors_ts30.txt'
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
import matplotlib.dates as dt
import numpy as np
def hbarplot():
times = [timedelta(0, 737),
timedelta(0, 110),
timedelta(0, 356),
timedelta(0, 171),
timedelta(0, 306)]
start_date = datetime(1900, 1, 1, 0, 0, 0)
times_datetime = [start_date + times[i] for i in range(len(times))]
# pandas requires numerical data on dependent axis
times_num = dt.date2num(times_datetime)
# to make times_num proportionally correct
for i in range(len(times_num)):
times_num[i] -= dt.date2num(start_date)
pass
df = pd.DataFrame([times_num], index=['Classes'])
fig, ax1 = plt.subplots(1, 1, sharex=True, sharey=True)
df.plot(kind='barh', ax=ax1, stacked=True)
plt.show()
def someplot():
ts = pd.Series(np.random.randn(20), index=pd.date_range("1/1/2000", periods=20))
ts = ts.cumsum()
fig, ax1 = plt.subplots(1, 1, sharex=True, sharey=True)
ts.plot(kind='barh', ax=ax1, stacked=True)
plt.show()
def freq():
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
dates = | pd.date_range(start='2020-02-14 20:30', end='2020-02-24', freq='10min') | pandas.date_range |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
loads:
The specified message and category data
Args:
messages_filepath (string): The file path of the messages csv
categories_filepath (string): The file path of the categories cv
Returns:
df (pandas dataframe): Merged messages and categories df, merged on ID
"""
#import data from cv
messages = pd.read_csv(messages_filepath)
global categories
categories = pd.read_csv(categories_filepath)
#merge Data on ID
#this could also work in a pinch
#df = messages.merge(categories, on='id', how='left')
df = | pd.merge(messages, categories, on='id') | pandas.merge |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import json, sys
import matplotlib.pyplot as plt
#declarar funciones para + - x / y usarlas desde otra función general
def suma(numeros):
suma = sum(numeros)
return suma ## No se devuelve nada
numeros = [1,2]
print("programa terminado")
def resta(numeros):
resta = resta(numeros) # has llamado a la variable igual que la funcion. Y estas llamando a la fuincion en bucle infinito (piensalo)
return resta
numeros = [4 ,3]
print("programa terminado")
def multiplicación (numeros):
multiplicación = multiply(numeros)
return multiplicación
numeros =[2, 3]
print ("programa terminado")
def división (numeros):
división = divide(numeros)
return división
numeros = [7,2]
print ("programa terminado")
# representar varias operaciones matemáticas en consola (print)
x = 2
y = 5
z1 = x + y
z2 = (x*y)/2
print(z1)
print(z2)
# Si usas las operaciones por defecto, ara qué te sisrven las que has declarado?
#representa comparaciones de los siguientes tipos (int, float, string)
# ¿Dónde se representan en la consola?
x = str('421')
print (x+x)
str('432')
y = 4/float('3')
print(y)
b = 4//3
print(b)
x = 5
print(type(x))
#PANDAS:
#1# cargar un excel con números
df1 = pd.read_excel('tabla.xls', index_col = 0)
#1.1# mostrar la suma de la primera columna
df1 = pd.DataFrame(df1)
df2 = df1.sum()
print (df2)
#estoy casi casi, con esto me da la suma justo de las otras dos....
#pero no doy con la solucion, si pongo (axis=0) me da lo mismo, si pongo
#axis=1 me suma la dos y la tres, y cualquier otra opción, error. por?
#2# cargar un archivo de texto en pandas
df2 = pd.read_csv('texto_tarea.txt')
#3# añadir una columna a cualquiera de los archivos anteriores
df1 = pd.read_excel('tabla.xls', index_col = 0)
columna_add = | pd.Series([0,1,2,3], name ="columna_add") | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 13 21:49:47 2021
@author: ALidtke
"""
import numpy as np
import pandas
import re
import copy
def _getZoneData(line):
""" Extract TecPlot formatted zone data from a line, return as two lists of
keys and matching values.
Attributes
----------
* ``line`` (`string`): string representation of a line in a TecPlot file
Returns
----------
* `list` with all the names of zone properties (sizes, SOLUTIONTIME, etc.)
* `list` with corresponding values
"""
keys = re.findall("[\,\s]+[a-zA-Z]+=", line)
vals = copy.deepcopy(line)
for k in keys:
vals = vals.replace(k, "SEPARATOR")
vals = [v.strip("\"").strip("\r") for v in vals.split("SEPARATOR") if v]
keys = [k.replace(",","").replace("=","").strip() for k in keys]
return keys, vals
def readTecPlot(filename, getZoneProperties=False):
""" Read a text-formatted TecPlot data file.
By default return each zone as a pandas DataFrame and ignore zone properties
Each zone is stored as an object inside a dict, but if only a single zone is
present in the file, a DataFrame instance only is returned.
Attributes
----------
* ``filename`` (`string`): path to the file to read and parse.
* ``getZoneProperties`` (`bool`, optional): whether or not to return all
the zone properties or just values themselves.
Returns
----------
* `dict` of :class:`pandas.core.frame.DataFrame` containing data of each zone
or a singe :class:`pandas.core.frame.DataFrame` if no zones are defined
in the file.
* `dict` of `dicts` with properties of each zone. Keys match those of data
object if the file contains multiple zone. Otherwise, the type is just
a `dict`.
"""
# read the file and split into lines, excluding any headers, comments and empty lines
with open(filename,"r") as instream:
s = instream.read().split("\n")
# this will hold the data for each zone
data = {}
newZone = "defaultZone" # used when there are no zones defined in the file
zoneProperties = {} # keywords and values specified in the definition of each zone
# go over each line
for l in s:
if ((len(l.strip()) == 0) or (l[0] == '#')):
continue
# if found a variables definition
if ("variables" in l) or ("VARIABLES" in l):
# safeguard against other information being in the same line as variables
if "variables" in l:
l = l.split("variables")[1]
else:
l = l.split("VARIABLES")[1]
variables = [v for v in l.split('"') if (v and (v.strip() != ',') and
('=' not in v) and ('variables' not in v) and ('VARAIBLES' not in v) and (v != "\r") and (v.strip()))]
# start a new zone
elif ("zone" in l) or ("ZONE" in l):
# find any and all zone properties and its name
l = l.replace("ZONE","").replace("zone","")
# get zone properties
keys, vals = _getZoneData(l)
# distinguish between zone title and other optional properties
for i in range(len(keys)):
if keys[i] in ["T", "t"]:
newZone = vals[i]
vals.pop(i)
keys.pop(i)
break
# Safeguard against empty zone names. Use index in the file instead.
if newZone == "":
newZone = len(data)
# create a new list for incoming zone values nad store optional properties
# avoid overwriting zones with the same name
if newZone in data:
initialZoneName = newZone
iRepeat = 0
while newZone in data:
newZone = "{}{}".format(initialZoneName, iRepeat)
iRepeat += 1
data[newZone] = []
zoneProperties[newZone] = dict(zip(keys, vals))
# sometimes files have titles, ignore
elif ("title" in l) or ("TITLE" in l):
pass
# this line must contain data, add it to the most recent zone
# if there are no zones defined, create a new one with a default name
elif l and len(l.strip()) > 0:
try:
data[newZone].append([float(v) for v in l.split()])
# if no zones defined in the file
except KeyError:
data[newZone] = []
data[newZone].append([float(v) for v in l.split()])
# if there is some spurious text, e.g. if someone used fixed line width,
# try to extract extra zone data, pass otherwise
except ValueError:
keys, vals = _getZoneData(l)
for i in range(len(keys)):
zoneProperties[newZone][keys[i]] = vals[i]
# convert to numpy arrays
for z in data:
data[z] = np.array(data[z])
# concatenate variables and data into pandas data frames.
for z in data:
if len(data[z]) > 0:
data[z] = | pandas.DataFrame(data[z], columns=variables) | pandas.DataFrame |
import sys
import os
import codecs
import glob
import configparser
import pandas as pd
from datetime import datetime
from docopt import docopt
from jinja2 import Environment, FileSystemLoader
from lib.Util.util import *
# Type of printing.
OK = 'ok' # [*]
NOTE = 'note' # [+]
FAIL = 'fail' # [-]
WARNING = 'warn' # [!]
NONE = 'none' # No label.
# Create report.
class CreateReport:
def __init__(self):
self.util = Utilty()
# Read config file.
full_path = os.path.dirname(os.path.abspath(__file__))
config = configparser.ConfigParser()
try:
config.read(os.path.join(full_path, 'config.ini'))
except Exception as err:
self.util.print_exception(err, 'File exists error')
sys.exit(1)
self.report_date_format = config['Report']['date_format']
self.report_test_path = os.path.join(
full_path, config['Report']['report_test'])
self.report_test_file = os.path.join(
self.report_test_path, config['Report']['report_test_file'])
self.template_test = config['Report']['template_test']
self.report_train_path = os.path.join(
self.report_test_path, config['Report']['report_train'])
self.report_train_file = os.path.join(
self.report_train_path, config['Report']['report_train_file'])
self.template_train = config['Report']['template_train']
self.header_train = str(config['Report']['header_train']).split('@')
self.header_test = str(config['Report']['header_test']).split('@')
def create_report(self, mode='train', start_date=None):
# Check mode.
if mode not in ['train', 'test']:
self.util.print_message(FAIL, 'Invalid mode: {}'.format(mode))
exit(1)
# Gather reporting items.
if mode == 'train':
self.util.print_message(NOTE, 'Creating training report.')
csv_file_list = glob.glob(os.path.join(
self.report_train_path, '*.csv'))
# Create DataFrame.
content_list = []
for file in csv_file_list:
df = | pd.read_csv(file, names=self.header_train, sep=',') | pandas.read_csv |
"""
Add Docstrings to Workflows
---------------------------
Documented code helps in enhancing the code readability. Flyte supports docstrings to document your code.
Docstrings are stored in FlyteAdmin and shown on the UI in the launch form.
This example demonstrates the various ways in which you can document your Flyte workflow.
The example workflow accepts a DataFrame and data class. We send a record that needs to be appended to the DataFrame through a data class.
"""
# %%
# Let's first import the libraries.
from dataclasses import dataclass
import pandas as pd
from dataclasses_json import dataclass_json
from flytekit import task, workflow
# %%
# We define a dataclass.
@dataclass_json
@dataclass
class PandasData(object):
id: int = 3
name: str = "Bonnie"
# %%
# Next, we define a task to append data to the DataFrame.
@task
def add_data(df: pd.DataFrame, data: PandasData) -> pd.DataFrame:
df = df.append({"id": data.id, "name": data.name}, ignore_index=True)
return df
# %%
# Sphinx-style Docstring
# ======================
#
# An example to demonstrate sphinx-style docstring.
#
# The first block of the docstring is a one-liner about the workflow.
# The second block of the docstring consists of a detailed description.
# The third block of the docstring describes the parameters and return type.
@workflow
def sphinx_docstring(df: pd.DataFrame, data: PandasData = PandasData()) -> pd.DataFrame:
"""
Showcase sphinx-style docstring.
This workflow accepts a DataFrame and data class.
It calls a task that appends the user-sent record to the DataFrame.
:param df: Pandas DataFrame
:param data: A data class pertaining to the new record to be stored in the DataFrame
:return: Pandas DataFrame
"""
return add_data(df=df, data=data)
# %%
# NumPy-style Docstring
# ======================
#
# An example to demonstrate numpy-style docstring.
#
# The first block of the docstring is a one-liner about the workflow.
# The second block of the docstring consists of a detailed description.
# The third block of the docstring describes all the parameters along with their data types.
# The fourth block of the docstring descibes the return type along with its data type.
@workflow
def numpy_docstring(df: pd.DataFrame, data: PandasData = PandasData()) -> pd.DataFrame:
"""
Showcase numpy-style docstring.
This workflow accepts a DataFrame and data class.
It calls a task that appends the user-sent record to the DataFrame.
Parameters
----------
df: pd.DataFrame
Pandas DataFrame
data: Dataclass
A data class pertaining to the new record to be stored in the DataFrame
Returns
-------
out : pd.DataFrame
Pandas DataFrame
"""
return add_data(df=df, data=data)
# %%
# Google-style Docstring
# ======================
#
# An example to demonstrate google-style docstring.
#
# The first block of the docstring is a one-liner about the workflow.
# The second block of the docstring consists of a detailed description.
# The third block of the docstring describes the parameters and return type along with their data types.
@workflow
def google_docstring(df: pd.DataFrame, data: PandasData = PandasData()) -> pd.DataFrame:
"""
Showcase google-style docstring.
This workflow accepts a DataFrame and data class.
It calls a task that appends the user-sent record to the DataFrame.
Args:
df(pd.DataFrame): Pandas DataFrame
data(Dataclass): A data class pertaining to the new record to be stored in the DataFrame
Returns:
pd.DataFrame: Pandas DataFrame
"""
return add_data(df=df, data=data)
# %%
# Lastly, we can run the workflow locally.
if __name__ == "__main__":
print(f"Running {__file__} main...")
print(
f"Running sphinx_docstring(), modified DataFrame is {sphinx_docstring(df=pd.DataFrame(data={'id': [1, 2], 'name': ['John', 'Meghan']}),data=PandasData(id=3, name='Bonnie'))}"
)
print(
f"Running numpy_docstring(), modified DataFrame is {numpy_docstring(df=pd.DataFrame(data={'id': [1, 2], 'name': ['John', 'Meghan']}),data=PandasData(id=3, name='Bonnie'))}"
)
print(
f"Running google_docstring(), modified DataFrame is {google_docstring(df= | pd.DataFrame(data={'id': [1, 2], 'name': ['John', 'Meghan']}) | pandas.DataFrame |
##########################################################################################################
## hqchartpy2 对接tushare第3方数据
##
##
##
##########################################################################################################
from hqchartpy2_fast import FastHQChart,IHQData,PERIOD_ID
import json
import time
import numpy as np
import pandas as pd
import tushare as ts
import datetime
from hqchartpy2_tushare_config import TushareConfig
from hqchartpy2_pandas import HQChartPy2Helper
##########################################################################################
## TushareHQChartData 全部使用Tushare数据接口
##
##
##########################################################################################
class TushareHQChartData(IHQData) :
def __init__(self, token, startDate, endDate):
ts.set_token(token)
self.TusharePro = ts.pro_api()
self.StartDate=startDate
self.EndDate=endDate
def GetKLineData(self, symbol, period, right, jobID) :
if (period==128787) : # 走势图
return self.GetMinuteData(symbol,period, right,jobID)
return self.GetKLineAPIData(symbol, period, right, self.StartDate, self.EndDate)
def GetMinuteData(self, symbol, period, right, jobID):
with open('test_data/minuteday.json',encoding='utf-8') as json_file:
data = json.load(json_file)
cacheData={}
cacheData['name']=symbol # 股票名称
cacheData['period']=period # 周期
cacheData['right']=right # 不复权
stock=data["stock"][0]
df = | pd.DataFrame(stock["minute"]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Code to make cast-like extractions from a LiveOcean run, at times
and places that match an observational dataset,
specified by -ds [ecology, woac, ...] and a year.
"""
import os; import sys
sys.path.append(os.path.abspath('../../LiveOcean/alpha'))
import Lfun
import pandas as pd
import cast_fun as cfun
from importlib import reload
reload(cfun)
testing = False
# get command line arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--gridname', nargs='?', type=str, default='cas6')
parser.add_argument('-t', '--tag', nargs='?', type=str, default='v3')
parser.add_argument('-x', '--ex_name', nargs='?', type=str, default='lo8b')
parser.add_argument('-y', '--year', nargs='?', type=int, default=2017)
parser.add_argument('-ds', '--data_source', nargs='?', type=str, default='ecology')
args = parser.parse_args()
gridname = args.gridname
tag = args.tag
ex_name = args.ex_name
Ldir = Lfun.Lstart(gridname, tag)
Ldir['gtagex'] = Ldir['gtag'] + '_' + ex_name
year = args.year
data_source = args.data_source
if data_source == 'ecology_canada':
# +++ load ecology CTD cast data +++
dir0 = Ldir['parent'] + 'ptools_data/ecology/'
# load processed station info and data
sta_df = pd.read_pickle(dir0 + 'sta_df.p')
# add Canadian data
dir1 = Ldir['parent'] + 'ptools_data/canada/'
# load processed station info and data
sta_df_ca = pd.read_pickle(dir1 + 'sta_df.p')
# combine
sta_df = | pd.concat((sta_df, sta_df_ca), sort=False) | pandas.concat |
"""Stashes that operate on a dataframe, which are useful to common machine
learning tasks.
"""
__author__ = '<NAME>'
from typing import Iterable, Dict, Set, Tuple
from dataclasses import dataclass, field
import logging
import sys
from io import TextIOBase
from abc import abstractmethod, ABCMeta
from collections import OrderedDict
from pathlib import Path
import pandas as pd
from sklearn.model_selection import train_test_split
from zensols.util import APIError
from zensols.config import Writable
from zensols.persist import (
Deallocatable,
PersistedWork,
persisted,
ReadOnlyStash,
PrimeableStash,
)
from zensols.install import Installer, Resource
from zensols.dataset import SplitKeyContainer
logger = logging.getLogger(__name__)
class DataframeError(APIError):
"""Thrown for dataframe stash issues."""
@dataclass
class DataframeStash(ReadOnlyStash, Deallocatable, Writable,
PrimeableStash, metaclass=ABCMeta):
"""A factory stash that uses a Pandas data frame from which to load. It uses
the data frame index as the keys and :class:`pandas.Series` as values. The
dataframe is usually constructed by reading a file (i.e.CSV) and doing some
transformation before using it in an implementation of this stash.
The dataframe created by :meth:`_get_dataframe` must have a string or
integer index since keys for all stashes are of type :class:`str`. The
index will be mapped to a string if it is an int automatically.
"""
dataframe_path: Path = field()
"""The path to store the pickeled version of the generated dataframe
created with :meth:`_get_dataframe`.
"""
def __post_init__(self):
super().__post_init__()
Deallocatable.__init__(self)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'split stash post init: {self.dataframe_path}')
self._dataframe = PersistedWork(self.dataframe_path, self, mkdir=True)
def deallocate(self):
super().deallocate()
self._dataframe.deallocate()
@abstractmethod
def _get_dataframe(self) -> pd.DataFrame:
"""Get or create the dataframe
"""
pass
def _prepare_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:
dt = df.index.dtype
if dt != object:
if dt != int:
s = f'Data frame index must be a string or int, but got: {dt}'
raise DataframeError(s)
else:
df.index = df.index.map(str)
return df
@property
@persisted('_dataframe')
def dataframe(self):
df = self._get_dataframe()
df = self._prepare_dataframe(df)
return df
def prime(self):
super().prime()
self.dataframe
def clear(self):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('clearing dataframe stash')
self._dataframe.clear()
def load(self, name: str) -> pd.Series:
return self.dataframe.loc[name]
def exists(self, name: str) -> bool:
return name in self.dataframe.index
def keys(self) -> Iterable[str]:
return map(str, self.dataframe.index)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
df = self.dataframe
self._write_line(f'rows: {df.shape[0]}', depth, writer)
self._write_line(f'cols: {", ".join(df.columns)}', depth, writer)
@dataclass
class SplitKeyDataframeStash(DataframeStash, SplitKeyContainer):
"""A stash and split key container that reads from a dataframe.
"""
key_path: Path = field()
"""The path where the key splits (as a ``dict``) is pickled."""
split_col: str = field()
"""The column name in the dataframe used to indicate the split
(i.e. ``train`` vs ``test``).
"""
def __post_init__(self):
super().__post_init__()
self._keys_by_split = PersistedWork(self.key_path, self, mkdir=True)
def deallocate(self):
super().deallocate()
self._keys_by_split.deallocate()
def _create_keys_for_split(self, split_name: str, df: pd.DataFrame) -> \
Iterable[str]:
"""Generate an iterable of string keys. It is expected this method to be
potentially very expensive, so the results are cached to disk. This
implementation returns the dataframe index.
:param split_name: the name of the split (i.e. ``train`` vs ``test``)
:param df: the data frame for the grouping of keys from CSV of data
"""
return df.index
def _get_counts_by_key(self) -> Dict[str, int]:
sc = self.split_col
return dict(self.dataframe.groupby([sc])[sc].count().items())
@persisted('_split_names')
def _get_split_names(self) -> Set[str]:
return set(self.dataframe[self.split_col].unique())
@persisted('_keys_by_split')
def _get_keys_by_split(self) -> Dict[str, Tuple[str]]:
keys_by_split = OrderedDict()
split_col = self.split_col
for split, df in self.dataframe.groupby([split_col]):
logger.info(f'parsing keys for {split}')
keys = self._create_keys_for_split(split, df)
keys_by_split[split] = tuple(keys)
return keys_by_split
def clear(self):
super().clear()
self.clear_keys()
def clear_keys(self):
"""Clear only the cache of keys generated from the group by.
"""
self._keys_by_split.clear()
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
total = self.dataframe.shape[0]
self._write_line('data frame splits:', depth, writer)
for split, cnt in self.counts_by_key.items():
self._write_line(f'{split}: {cnt} ({cnt/total*100:.1f}%)',
depth, writer)
self._write_line(f'total: {total}', depth, writer)
@dataclass
class AutoSplitDataframeStash(SplitKeyDataframeStash):
"""Automatically a dataframe in to train, test and validation datasets by
adding a :obj:`split_col` with the split name.
"""
distribution: Dict[str, float] = field()
"""The distribution as a percent across all key splits. The distribution
values must add to 1. The keys must have ``train``, ``test`` and
``validate``.
"""
def __post_init__(self):
super().__post_init__()
sm = float(sum(self.distribution.values()))
err_low, err_high, errm = (1. - sm), (1. + sm), 1e-1
if err_low > errm:
raise APIError('distriubtion must add to 1: ' +
f'{self.distribution} (err={err_low} > errm)')
if err_high < errm:
raise APIError('distriubtion must add to 1: ' +
f'{self.distribution} (err={err_low} > errm)')
def _prepare_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:
n_train = self.distribution['train']
n_test = self.distribution['test']
n_val = self.distribution['validate']
n_test_val = n_test + n_val
n_test = n_test / n_test_val
train, test_val = train_test_split(df, test_size=1-n_train)
test, val = train_test_split(test_val, test_size=n_test)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'split dataframe: train: {train.size}, ' +
f'test: {test.size}, validation: {val.size}')
# pandas complains about modifying a slice
train = train.copy()
test = test.copy()
val = val.copy()
train[self.split_col] = 'train'
test[self.split_col] = 'test'
val[self.split_col] = 'validation'
df = | pd.concat([train, test, val], ignore_index=False) | pandas.concat |
'''
This script prepares the session-level transcript for au actions
'''
import os
import pandas as pd
pd.__version__
import Python.Data_Preprocessing.config.config as cfg
from tqdm import tqdm
def z_score_per_dev_fold(df, col_name):
# for loop
df_result = pd.DataFrame(columns=["Video_ID", col_name +'_z'])
df_train = df
train_mil = cfg.parameters_cfg['a_'+col_name+'_mu']
train_sd = cfg.parameters_cfg['a_'+col_name+'_sd']
df_series = (df[col_name] - train_mil) / (train_sd)
df_return = pd.DataFrame()
df_return['Video_ID'] = df_train.video_id.apply(lambda x: x.split('_')[0])
df_return[col_name + '_z'] = df_series
df_result = df_result.append(df_return)
return df_result
def get_z_bucket(z_series):
'''
takes in the z_series and outputs the bucket very low;low;high;very high
'''
df = | pd.DataFrame(z_series) | pandas.DataFrame |
import numpy as np
import pandas as pd
from cached_property import cached_property
from scipy.optimize import minimize
from eventsearch.signals import Smoother
from eventsearch.events import Event
from eventsearch.events import EventDataFrame as OrigEventDataFrame
from eventsearch.event_utils import analyse_capacitor_behavior
class SpontaneousActivityEvent(Event):
# todo: detecting and analyzing multi peak events
def __init__(self, *args, **kwargs):
"""
Spontaneous activity event class. This is an extension of the eventsearch. Event class to handle biological
events.
Parameters
----------
data: SingleSignal
signal data
t_start: float
start time
t_end: float
end time
t_reference: float
reference time
"""
super(SpontaneousActivityEvent, self).__init__(*args, **kwargs)
self.register_cached_property('peak_time')
self.register_cached_property('peak_value')
self.register_cached_property('pre_peak_time')
self.register_cached_property('pre_peak_value')
self.register_cached_property('integral')
self.register_cached_property('mean_start_slope')
self._event_data += ['mean_start_slope', 'peak_value', 'peak_time', 'pre_peak_time', 'pre_peak_value']
@cached_property
def peak_time(self):
"""
Returns
-------
peak time: float
"""
peak_id = np.argmin(self.y_local)
return self.t_local[peak_id]
@cached_property
def peak_value(self):
"""
Returns
-------
peak amplitude: float
"""
return np.min(self.y_local)
@cached_property
def pre_peak_time(self):
"""
Returns
-------
time of the local maximum before the peak: float
"""
mask = self.t_local <= self.peak_time
peak_id = np.argmax(self.y_local[mask])
return self.t_local[peak_id]
@cached_property
def pre_peak_value(self):
"""
Returns
-------
value of the local maximum before the peak: float
"""
mask = self.t_local <= self.peak_time
return np.max(self.y_local[mask])
def _capacitor_hypothesis(self, t_local, ymax, tau):
"""
hypothesis for capacitor behavior fitting
Parameters
----------
t_local: ndarray
local evaluation time points
ymax: float
settling value
tau: float
time constant
Returns
-------
hypothesis values: ndarray
"""
return ymax - (ymax - self.simplified_peak_end_value) * np.exp(-(t_local - self.simplified_peak_end_time) / tau)
def approximate_capacitor_behavior(self, cutoff: float = 0.3, iterations: int = 5,
smoother: Smoother = Smoother(31), **kwargs):
"""
Simple capacitor behavior analysis based on filtering and linear fitting in the phase domain. Only usable when
the data is clean. Noisy or uncertain behavior have to be fitted with "refine_capacitor_behavior".
Parameters
----------
cutoff: float, optional
Cutoff value for value filtering. Default is 0.3.
iterations; int, optional
Number of iterations. Default is 5.
smoother: Smoother, optional
Smoother for smoothing the data. Default is Smoother(window_len=31, window='hann').
"""
def loss(par):
return np.mean((self._capacitor_hypothesis(t, *par) - y) ** 2)
self.del_cache()
ymax, tau, alpha = analyse_capacitor_behavior(self, cutoff=cutoff, iterations=iterations, **kwargs)
t = self.t_local[self.t_local >= self.peak_time]
if len(t) > 3:
if len(t) <= smoother.window_len:
smoother.window_len = len(t) - 1
y = smoother.smooth(self.y_local[self.t_local >= self.peak_time])
self.simple_cap_ymax = ymax
self.simple_cap_tau = tau
self.simple_cap_alpha = alpha
self.simple_cap_loss = loss((ymax, tau))
else:
self.simple_cap_ymax = np.NaN
self.simple_cap_tau = np.NaN
self.simple_cap_alpha = np.NaN
self.simple_cap_loss = np.NaN
def capacitor_time_series(self, t_local, type='simple'):
"""
Cut out capacitor behavior of the event.
Parameters
----------
t_local: ndarray
local evaluation time points
type: {'simple', 'fitted'}, optional
Choose method. Default 'simple'.
Returns
-------
data values: ndarray
"""
ymax = self[type + '_cap_ymax']
tau = self[type + '_cap_tau']
if ymax is np.NaN or tau is np.NaN:
return np.array(len(t_local) * [np.NaN, ])
else:
return self._capacitor_hypothesis(t_local, ymax, tau)
def fit_capacitor_behavior(self, smoother: Smoother = Smoother(11, signal_smoothing=False), **kwargs):
"""
Fit capacitor behavior hypothesis by minimizing L2 distance.
Parameters
----------
smoother: Smoother, optional
Smoother for smoothing the data. Default is no smoothing.
"""
def loss(par):
return np.nanmean((self._capacitor_hypothesis(t, *par) - y) ** 2)
self.del_cache()
# ymax = self.simple_cap_ymax if self.simple_cap_ymax is not np.NaN else np.max(self.y)
ymax = 10
# tau = self.simple_cap_tau if self.simple_cap_tau is not np.NaN else 0
tau = 1e5
t = self.t_local[self.t_local >= self.simplified_peak_end_time]
y = smoother.smooth(self.y_local[self.t_local >= self.simplified_peak_end_time])
res = minimize(loss, x0=[ymax, tau], method='Nelder-Mead', options={'max_iter': 10e3})
if res.status != 0:
tau = -1e-3
ymin = self.peak_value - 10
tmp_res = minimize(loss, x0=[ymin, tau], method='Nelder-Mead', options={'max_iter': 10e3})
if tmp_res.fun < res.fun:
res = tmp_res
self.fitted_cap_ymax = res.x[0]
self.fitted_cap_tau = res.x[1]
self.fitted_cap_loss = res.fun
self.fitted_cap_status = res.status
@cached_property
def mean_start_slope(self):
"""
Returns
-------
mean slope of event rising by linearizing: float
"""
return (self.peak_value - self.start_value) / (self.peak_time - self.start_time)
class EventDataFrame(OrigEventDataFrame):
def __init__(self, *args, **kwargs):
"""
Extend EventDataFrame from eventsearch with the probability to add complete SNADatasets as data.
"""
super(EventDataFrame, self).__init__(*args, **kwargs)
def set_dataset(self, dataset):
"""
Add complete SNADataset as data.
Parameters
----------
dataset: SNADataset
"""
self.data = | pd.DataFrame() | pandas.DataFrame |
# -*- coding:utf-8 -*-
"""
龙虎榜类
Created on 2019/01/11
@author: TabQ
@group : gugu
@contact: <EMAIL>
"""
import time
import re
import pandas as pd
from pandas.compat import StringIO
import lxml.html
from lxml import etree
from gugu.base import Base, cf
from gugu.utility import Utility
class BillBoard(Base):
def topList(self, date=None, retry=3, pause=0.001):
"""
获取每日龙虎榜列表
Parameters
--------
date:string
明细数据日期 format:YYYY-MM-DD 如果为空,返回最近一个交易日的数据
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame or List: [{'code':, 'name':, ...}, ...]
code:代码
name :名称
pchange:涨跌幅
amount:龙虎榜成交额(万)
buy:买入额(万)
bratio:占总成交比例
sell:卖出额(万)
sratio :占总成交比例
reason:上榜原因
unscramble: 解读
date :日期
"""
self._data = pd.DataFrame()
if date is None:
if Utility.getHour() < 18:
date = Utility.lastTradeDate()
else:
date = Utility.getToday()
else:
if not Utility.isTradeDay(date):
return None
for _ in range(retry):
time.sleep(pause)
try:
# http://data.eastmoney.com/DataCenter_V3/stock2016/TradeDetail/pagesize=200,page=1,sortRule=-1,sortType=,startDate=2019-01-10,endDate=2019-01-10,gpfw=0,js=vardata_tab_1.html
request = self._session.get( cf.LHB_URL % (date, date), timeout=10 )
request.encoding = 'gbk'
text = request.text.split('_1=')[1]
dataDict = Utility.str2Dict(text)
self._data = pd.DataFrame(dataDict['data'], columns=cf.LHB_TMP_COLS)
self._data.columns = cf.LHB_COLS
self._data['buy'] = self._data['buy'].astype(float)
self._data['sell'] = self._data['sell'].astype(float)
self._data['amount'] = self._data['amount'].astype(float)
self._data['Turnover'] = self._data['Turnover'].astype(float)
self._data['bratio'] = self._data['buy'] / self._data['Turnover']
self._data['sratio'] = self._data['sell'] / self._data['Turnover']
self._data['bratio'] = self._data['bratio'].map(cf.FORMAT)
self._data['sratio'] = self._data['sratio'].map(cf.FORMAT)
self._data['date'] = date
for col in ['amount', 'buy', 'sell']:
self._data[col] = self._data[col].astype(float)
self._data[col] = self._data[col] / 10000
self._data[col] = self._data[col].map(cf.FORMAT)
self._data = self._data.drop('Turnover', axis=1)
except:
pass
else:
return self._result()
raise IOError(cf.NETWORK_URL_ERROR_MSG)
def countTops(self, days=5, retry=3, pause=0.001):
"""
获取个股上榜统计数据
Parameters
--------
days:int
天数,统计n天以来上榜次数,默认为5天,其余是10、30、60
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame or List: [{'code':, 'name':, ...}, ...]
code:代码
name:名称
count:上榜次数
bamount:累积购买额(万)
samount:累积卖出额(万)
net:净额(万)
bcount:买入席位数
scount:卖出席位数
"""
self._data = pd.DataFrame()
if Utility.checkLhbInput(days) is True:
self._writeHead()
# http://vip.stock.finance.sina.com.cn/q/go.php/vLHBData/kind/ggtj/index.phtml?last=5&p=1
self._data = self.__parsePage(kind=cf.LHB_KINDS[0], last=days, column=cf.LHB_GGTJ_COLS, dataArr=pd.DataFrame(), pageNo=1, retry=retry, pause=pause)
self._data['code'] = self._data['code'].map(lambda x: str(x).zfill(6))
if self._data is not None:
self._data = self._data.drop_duplicates('code')
return self._result()
def brokerTops(self, days=5, retry=3, pause=0.001):
"""
获取营业部上榜统计数据
Parameters
--------
days:int
天数,统计n天以来上榜次数,默认为5天,其余是10、30、60
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
---------
DataFrame or List: [{'broker':, 'count':, ...}, ...]
broker:营业部名称
count:上榜次数
bamount:累积购买额(万)
bcount:买入席位数
samount:累积卖出额(万)
scount:卖出席位数
top3:买入前三股票
"""
self._data = pd.DataFrame()
if Utility.checkLhbInput(days) is True:
self._writeHead()
# http://vip.stock.finance.sina.com.cn/q/go.php/vLHBData/kind/yytj/index.phtml?last=5&p=1
self._data = self.__parsePage(kind=cf.LHB_KINDS[1], last=days, column=cf.LHB_YYTJ_COLS, dataArr=pd.DataFrame(), pageNo=1, retry=retry, pause=pause)
return self._result()
def instTops(self, days=5, retry=3, pause=0.001):
"""
获取机构席位追踪统计数据
Parameters
--------
days:int
天数,统计n天以来上榜次数,默认为5天,其余是10、30、60
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'code':, 'name':, ...}, ...]
code:代码
name:名称
bamount:累积买入额(万)
bcount:买入次数
samount:累积卖出额(万)
scount:卖出次数
net:净额(万)
"""
self._data = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from math import floor
import time
from scipy.stats import circmean
from scipy.stats import circstd
from collections import deque
import os
import subprocess
import glob
from datetime import datetime
df_norm = | pd.read_csv('KneeFlexExt.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
import pickle
import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from foolbox import PyTorchModel
from foolbox.attacks import L2AdditiveGaussianNoiseAttack
torch.manual_seed(0)
np.random.seed(0)
class Model(nn.Module):
def __init__(self, input_dim):
super(Model, self).__init__()
self.layer1 = nn.Linear(input_dim, 50)
self.layer2 = nn.Linear(50, 20)
self.layer3 = nn.Linear(20, 1)
def forward(self, x):
x = F.relu(self.layer1(x))
x = F.relu(self.layer2(x))
x = self.layer3(x)
return x
dataframe = pd.read_csv('../Data/auto_filtered.csv', header = 0)
dataset = dataframe.values.astype(float)
Y = dataframe.loc[:,'mpg']
X = dataset[:,:-1]
Y_binned, bins = pd.cut(Y, bins = 10, labels = False, retbins = True)
scaler = MinMaxScaler()
scaler.fit(X)
X = scaler.transform(X)
features_train, features_test, labels_train, labels_test = train_test_split(X, Y, random_state=42, shuffle=True)
x_train, y_train = Variable(torch.from_numpy(features_train)).float(), Variable(torch.from_numpy(labels_train.values)).float()
x_test, y_test = Variable(torch.from_numpy(features_test)).float(), Variable(torch.from_numpy(labels_test.values)).float()
x_final, y_final = Variable(torch.from_numpy(X)).float(), Variable(torch.from_numpy(Y.values)).float()
model = Model(features_train.shape[1])
optimizer = torch.optim.Adam(model.parameters(), lr = 0.01)
loss_fn = nn.MSELoss()
epochs = 100
def print_(loss):
print ("The loss calculated: ", loss)
print("Training")
for epoch in range(1, epochs+1):
print ("Epoch #",epoch)
y_pred = model.forward(x_train)
loss = loss_fn(y_pred.squeeze(), y_train)
print_(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("Training error:", mean_squared_error(model(x_train).detach().numpy(),y_train))
y_pred = model(x_test).detach().numpy()
print("Test error:", mean_squared_error(y_pred, y_test))
fmodel = PyTorchModel(model.eval(), bounds=(-1, 2))
attack = L2AdditiveGaussianNoiseAttack()
epsilons = np.linspace(0.0, 6.0, num=6, endpoint=False)
advs, _, success = attack(fmodel, x_final, y_final, epsilons=epsilons)
perturbed = np.zeros((len(X),7))
added = np.zeros(len(X))
print("Finding adversarial inputs")
for eps, adv in zip(epsilons, advs):
if 0 not in added:
break
pred = model(adv)
pred = pred.detach().numpy().ravel()
pred_binned = np.digitize(pred, bins)
dif = [True if i[0]==i[1] else False for i in zip(pred_binned, Y_binned)] #True if class is correct
for i in range(len(dif)):
if dif[i]==False and added[i]==0:
perturbed[i] = adv[i]
added[i] = 1
print(eps, mean_squared_error(Y, pred))
perturbed_output = scaler.inverse_transform(perturbed)
perturbed_output = [[max(0,round(x)) if i!=4 else max(0,round(x,1)) for i,x in enumerate(nested)] for nested in perturbed_output]
Y_output = Y.values
Y_output = np.reshape(Y_output, (len(Y_output), 1))
pred_final = model(Variable(torch.from_numpy(perturbed)).float())
pred_final = pred_final.detach().numpy().ravel()
pred_final = np.reshape(pred_final, (len(pred), 1))
pred_final_binned = np.digitize(pred_final, bins)
output = np.hstack((perturbed_output,Y_output,pred_final))
| pd.DataFrame(output) | pandas.DataFrame |
# from intermediate_atoms import add_intermediate_atom_stats
# from distance_features import add_distance_features
# from molecule_features import add_molecule_features
# from neighbor_features_atom_index import add_neighbors_features
# from bond_features import add_bond_features
# from edge_features import add_edge_features
# from nbr_based_atom_types import get_atom_type, add_atom_type_both_indices
# from cycle_features import add_cycle_features
# from openbabel_data import add_obabel_based_features
# from atom_potentials import add_atom_potential_features
import pandas as pd
def get_X(X_df, structures_df, atom_encoder, edge_df, ia_df, neighbors_df, cycles_df, obabel_atom_df):
# Get atom type of each atom in molecule based on how many neighbors are C,H,O,N,F
atom_type_df = get_atom_type(edge_df, structures_df)
X_df = add_molecule_features(X_df, structures_df)
# coulomb,yukawa potential
X_df = add_atom_potential_features(X_df, structures_df, edge_df)
X_df = add_obabel_based_features(X_df, obabel_atom_df)
# length of cycle and information about whether atom or its neighbor was in cycle.
add_cycle_features(cycles_df, ia_df, X_df)
# It is necessary to first call molecule feature as distance features use some of the columns created in
# molecule features.
X_df = add_distance_features(X_df, structures_df)
# add_conical_segmented_feature(X_df,structures_df,edge_df)
# ascribe atom_type for both neighbors.
X_df = add_atom_type_both_indices(X_df, atom_type_df)
X_df = add_edge_features(edge_df, X_df, structures_df, ia_df, neighbors_df, obabel_atom_df)
# it must be called after distance features
X_df = add_bond_features(X_df)
# it must be called after distance features.
X_df = add_neighbors_features(X_df, structures_df, atom_encoder)
# X_df = add_intermediate_atom_stats(X_df, structures_df)
bond_encoding(X_df)
return X_df
def data_augmentation_on_train(train_X_df, train_Y_df):
"""
Since target is agnostic of the order of the atoms, (H-O and O-H will have same constant)
"""
X_aug = train_X_df[train_X_df['enc_atom_0'] != train_X_df['enc_atom_1']].copy()
Y_aug = train_Y_df.loc[X_aug.index].copy()
for col in ['atom_index', 'x', 'y', 'z', 'enc_atom']:
zero_col = col + '_0'
one_col = col + '_1'
X_aug[zero_col] = train_X_df[one_col]
X_aug[one_col] = train_X_df[zero_col]
X_aug.index = X_aug.index + max(train_X_df.index) + 1
Y_aug.index = Y_aug.index + max(train_Y_df.index) + 1
X_final = pd.concat([train_X_df, X_aug])
Y_final = | pd.concat([train_Y_df, Y_aug]) | pandas.concat |
import pandas as pd
import numpy as np
import altair as alt
import matplotlib.pyplot as plt
def get_first_row(s):
return s.iloc[0]
#Reads the first line of line of data and determines if data is categorical, quantitative or nominal
def auto_get_data_type(df):
type_dict = dict()
columns = list(df.columns)
for column in columns:
value = get_first_row(df[column])
if isinstance(value, str):
if value.isnumeric():
type_dict[column] = 'Q'
else:
type_dict[column] = 'C'
else:
type_dict[column] = 'Q'
return type_dict
#Manually enter if data is categorical, quantitative, nominal or id
def manual_entry_data_type(df):
type_dict = dict()
for column in list(df.columns):
type_dict[column] = input('Enter the variable type for {} (Quantitative/Categorical/Index/Time) Q/C/I/T:'.format(column))
return type_dict
def get_df_column_list(df):
return list(df.columns)
def manual_data_type_entry(df):
value = input('First time data entry (F), Correction (C), Skip this (S):')
if value == 'F':
type_dict = manual_enter_data_type(df)
elif value == 'C':
correction = 'y'
while correction == 'y':
variable = input('Enter variable name:')
value = input('Enter variable type:')
type_dict[variable] = value
correction = input('Update more variables(y/n):')
elif value == 'S':
print('Cool! here is dict:',type_dict)
return type_dict
def get_column_names_for_variable_type(columns,type_dict,variable_type):
cat_columns = [key for key,value in type_dict.items() if value is variable_type]
return cat_columns
def get_data_for_variables(df,data_type_dict,variable_type):
#print('get_data_for_variables--------------->',df)
columns = get_df_column_list(df)
var_columns = get_column_names_for_variable_type(columns,data_type_dict,variable_type)
index_column = get_index_column(columns,data_type_dict)
data_dict = dict()
if variable_type == 'C':
for column in var_columns:
summary = df.groupby(column).agg({index_column: 'count'}).reset_index()
data_dict[column] = summary
return data_dict,var_columns
elif variable_type == 'Q':
for column in var_columns:
quantitative_data = clean_quantitative_data(df[column])
data_dict[column] = quantitative_data
return data_dict,var_columns
def get_index_column(columns,type_dict):
index_column = [key for key,value in type_dict.items() if value is 'I']
return index_column[0]
def get_time_column(columns,type_dict):
time_column = [key for key,value in type_dict.items() if value is 'T']
return time_column[0]
def create_sorted_bar_chart(df,x_name,y_name,color='orange'):
chart = alt.Chart(df).mark_bar(color=color).encode(
x = x_name,
y = alt.Y(y_name, sort='-x'))
return chart
def get_x_y_column_names(df):
columns = get_df_column_list(df)
x_name = columns[1]
y_name = columns[0]
return x_name,y_name
def show_sorted_bar_chart(df):
x_name,y_name = get_x_y_column_names(df)
chart = create_sorted_bar_chart(df,x_name,y_name,color='orange')
return chart
def clean_quantitative_data(s):
s = | pd.to_numeric(s, errors='coerce', downcast='float') | pandas.to_numeric |
import glob
import os
import argparse
import pandas as pd
def generate_csv(folder, labels):
folder_name = os.path.basename(folder)
# convert comma separated labels into a list
label2int = {}
if labels:
labels = labels.split(",")
for label in labels:
string_label, integer_label = label.split("=")
label2int[string_label] = integer_label
labels = list(label2int)
# generate CSV file
df = | pd.DataFrame(columns=["filepath", "label"]) | pandas.DataFrame |
print(__doc__)
# ref: http://www.agcross.com/blog/2015/02/05/random-forests-in-python-with-scikit-learn/
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.datasets import load_iris
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
iris = load_iris()
df = | pd.DataFrame(iris.data, columns=iris.feature_names) | pandas.DataFrame |
from __future__ import annotations
import typing
from ctc import spec
import numpy as np
import pandas as pd
def create_timestamp_column(
df: spec.DataFrame,
timestamps: typing.Sequence[int],
) -> spec.Series:
"""create a timestamp column series that shares an index with dataframe"""
index = df.index
if isinstance(index, pd.MultiIndex):
# keep only first level of multiindex
n_levels = len(df.index.names)
df = typing.cast(spec.DataFrame, df.droplevel(list(range(1, n_levels))))
merged = pd.merge(
df,
pd.Series(timestamps, name='timestamp'),
left_index=True,
right_index=True,
how='left',
)
merged.index = index
return merged['timestamp']
def create_datetime_column(
df: spec.DataFrame,
timestamps: typing.Sequence[int] | None = None,
) -> spec.Series:
if 'timestamp' in df.columns:
timestamp_column = df['timestamp']
else:
if timestamps is None:
raise Exception('must specify timestamps')
timestamp_column = create_timestamp_column(df, timestamps)
result = pd.to_datetime(timestamp_column, unit='s')
if not isinstance(result, pd.Series):
raise Exception('bad inputs given')
return result
def create_date_column(
df: spec.DataFrame,
timestamps: typing.Sequence[int] | None = None,
) -> spec.Series:
if 'datetime' in df.columns:
datetime = df['datetime']
else:
if timestamps is None:
raise Exception('must specify timestamps')
datetime = create_timestamp_column(df, timestamps)
result = datetime.dt.date
if not isinstance(result, pd.Series):
raise Exception('bad inputs given')
return result
def create_week_column(
df: spec.DataFrame,
timestamps: typing.Sequence[int],
) -> spec.Series:
if 'datetime' in df.columns:
datetime = df['datetime']
else:
datetime = create_timestamp_column(df, timestamps)
year = datetime.dt.isocalendar().year.astype(str)
week = datetime.dt.isocalendar().week.astype(str)
result = year + '-' + week
if not isinstance(result, pd.Series):
raise Exception('bad inputs given')
return result
def add_missing_series_dates(
series: spec.Series, datetimes: spec.Series, fill_value: int = 0
) -> spec.Series:
series = series.copy()
all_days = np.arange(
datetimes.min().timestamp(),
datetimes.max().timestamp(),
86400,
)
for day in all_days:
date = | pd.to_datetime(day, unit='s') | pandas.to_datetime |
# -*- coding: utf-8 -*-
from warnings import catch_warnings
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
import pandas as pd
from pandas.core import config as cf
from pandas.compat import u
from pandas._libs.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import (
array_equivalent, isnull, notnull,
na_value_for_dtype)
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
class TestIsNull(object):
def test_0d_array(self):
assert isnull(np.array(np.nan))
assert not isnull(np.array(0.0))
assert not isnull(np.array(0))
# test object dtype
assert isnull(np.array(np.nan, dtype=object))
assert not isnull(np.array(0.0, dtype=object))
assert not isnull(np.array(0, dtype=object))
def test_empty_object(self):
for shape in [(4, 0), (4,)]:
arr = np.empty(shape=shape, dtype=object)
result = isnull(arr)
expected = np.ones(shape=shape, dtype=bool)
tm.assert_numpy_array_equal(result, expected)
def test_isnull(self):
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert float('nan')
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert isinstance(isnull(s), Series)
# frame
for df in [tm.makeTimeDataFrame(), tm.makePeriodFrame(),
tm.makeMixedDataFrame()]:
result = isnull(df)
expected = df.apply(isnull)
tm.assert_frame_equal(result, expected)
# panel
with catch_warnings(record=True):
for p in [tm.makePanel(), tm.makePeriodPanel(),
tm.add_nans(tm.makePanel())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel_equal(result, expected)
# panel 4d
with catch_warnings(record=True):
for p in [tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel4d_equal(result, expected)
def test_isnull_lists(self):
result = isnull([[False]])
exp = np.array([[False]])
tm.assert_numpy_array_equal(result, exp)
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
tm.assert_numpy_array_equal(result, exp)
# list of strings / unicode
result = isnull(['foo', 'bar'])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
result = isnull([u('foo'), u('bar')])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
def test_isnull_nat(self):
result = isnull([NaT])
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
result = isnull(np.array([NaT], dtype=object))
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
def test_isnull_numpy_nat(self):
arr = np.array([NaT, np.datetime64('NaT'), np.timedelta64('NaT'),
np.datetime64('NaT', 's')])
result = isnull(arr)
expected = np.array([True] * 4)
tm.assert_numpy_array_equal(result, expected)
def test_isnull_datetime(self):
assert not isnull(datetime.now())
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
exp = np.ones(len(idx), dtype=bool)
tm.assert_numpy_array_equal(notnull(idx), exp)
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
# GH 9129
pidx = idx.to_period(freq='M')
mask = isnull(pidx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
mask = isnull(pidx[1:])
exp = np.zeros(len(mask), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
def test_datetime_other_units(self):
idx = pd.DatetimeIndex(['2011-01-01', 'NaT', '2011-01-02'])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(idx), exp)
tm.assert_numpy_array_equal(notnull(idx), ~exp)
tm.assert_numpy_array_equal(isnull(idx.values), exp)
tm.assert_numpy_array_equal(notnull(idx.values), ~exp)
for dtype in ['datetime64[D]', 'datetime64[h]', 'datetime64[m]',
'datetime64[s]', 'datetime64[ms]', 'datetime64[us]',
'datetime64[ns]']:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(values), exp)
tm.assert_numpy_array_equal(notnull(values), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(values)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
s = pd.Series(values, dtype=object)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
def test_timedelta_other_units(self):
idx = pd.TimedeltaIndex(['1 days', 'NaT', '2 days'])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(idx), exp)
tm.assert_numpy_array_equal(notnull(idx), ~exp)
tm.assert_numpy_array_equal(isnull(idx.values), exp)
tm.assert_numpy_array_equal(notnull(idx.values), ~exp)
for dtype in ['timedelta64[D]', 'timedelta64[h]', 'timedelta64[m]',
'timedelta64[s]', 'timedelta64[ms]', 'timedelta64[us]',
'timedelta64[ns]']:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(values), exp)
tm.assert_numpy_array_equal(notnull(values), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(values)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
s = pd.Series(values, dtype=object)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
def test_period(self):
idx = pd.PeriodIndex(['2011-01', 'NaT', '2012-01'], freq='M')
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(idx), exp)
tm.assert_numpy_array_equal(notnull(idx), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(idx)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
s = pd.Series(idx, dtype=object)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
def test_array_equivalent():
assert array_equivalent(np.array([np.nan, np.nan]),
np.array([np.nan, np.nan]))
assert array_equivalent(np.array([np.nan, 1, np.nan]),
np.array([np.nan, 1, np.nan]))
assert array_equivalent(np.array([np.nan, None], dtype='object'),
np.array([np.nan, None], dtype='object'))
assert array_equivalent(np.array([np.nan, 1 + 1j], dtype='complex'),
np.array([np.nan, 1 + 1j], dtype='complex'))
assert not array_equivalent(
np.array([np.nan, 1 + 1j], dtype='complex'), np.array(
[np.nan, 1 + 2j], dtype='complex'))
assert not array_equivalent(
np.array([np.nan, 1, np.nan]), np.array([np.nan, 2, np.nan]))
assert not array_equivalent(
np.array(['a', 'b', 'c', 'd']), np.array(['e', 'e']))
assert array_equivalent(Float64Index([0, np.nan]),
Float64Index([0, np.nan]))
assert not array_equivalent(
Float64Index([0, np.nan]), Float64Index([1, np.nan]))
assert array_equivalent(DatetimeIndex([0, np.nan]),
DatetimeIndex([0, np.nan]))
assert not array_equivalent(
DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan]))
assert array_equivalent(TimedeltaIndex([0, np.nan]),
TimedeltaIndex([0, np.nan]))
assert not array_equivalent(
TimedeltaIndex([0, np.nan]), TimedeltaIndex([1, np.nan]))
assert array_equivalent(DatetimeIndex([0, np.nan], tz='US/Eastern'),
DatetimeIndex([0, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan], tz='US/Eastern'), DatetimeIndex(
[1, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan]), DatetimeIndex(
[0, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan], tz='CET'), DatetimeIndex(
[0, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan]), TimedeltaIndex([0, np.nan]))
def test_array_equivalent_compat():
# see gh-13388
m = np.array([(1, 2), (3, 4)], dtype=[('a', int), ('b', float)])
n = np.array([(1, 2), (3, 4)], dtype=[('a', int), ('b', float)])
assert (array_equivalent(m, n, strict_nan=True))
assert (array_equivalent(m, n, strict_nan=False))
m = np.array([(1, 2), (3, 4)], dtype=[('a', int), ('b', float)])
n = np.array([(1, 2), (4, 3)], dtype=[('a', int), ('b', float)])
assert (not array_equivalent(m, n, strict_nan=True))
assert (not array_equivalent(m, n, strict_nan=False))
m = np.array([(1, 2), (3, 4)], dtype=[('a', int), ('b', float)])
n = np.array([(1, 2), (3, 4)], dtype=[('b', int), ('a', float)])
assert (not array_equivalent(m, n, strict_nan=True))
assert (not | array_equivalent(m, n, strict_nan=False) | pandas.core.dtypes.missing.array_equivalent |
# Copyright 2017-2021 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import functools
import importlib
import inspect
import logging
import os
import sys
import threading
import warnings
import six
with warnings.catch_warnings():
warnings.simplefilter("ignore", Warning)
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
try:
import pandas as pd
except ImportError:
raise RuntimeError(
"guild.ipy requires pandas - install it first before using "
"this module (see https://pandas.pydata.org/pandas-docs/stable/"
"install.html for help)"
)
# ipy makes use of the full Guild API and so, like main_bootstrap,
# requires the external modules.
from guild import main_bootstrap
main_bootstrap.ensure_external_path()
from guild import batch_util
from guild import click_util
from guild import config
from guild import exit_code
from guild import index as indexlib
from guild import model_proxy
from guild import op_util
from guild import opref as opreflib
from guild import run as runlib
from guild import run_util
from guild import summary
from guild import util
from guild import var
from guild.commands import runs_impl
log = logging.getLogger("guild")
RUN_DETAIL = [
"id",
"operation",
"status",
"started",
"stopped",
"label",
"run_dir",
]
DEFAULT_MAX_TRIALS = 20
class RunException(Exception):
def __init__(self, run, from_exc):
super(RunException, self).__init__(run, from_exc)
self.run = run
self.from_exc = from_exc
class RunError(RunException):
pass
class RunTerminated(RunException):
pass
class OutputTee(object):
def __init__(self, fs, lock):
self._fs = fs
self._lock = lock
def write(self, s):
with self._lock:
for f in self._fs:
f.write(s)
def flush(self):
with self._lock:
for f in self._fs:
f.flush()
class RunOutput(object):
def __init__(self, run, summary=None):
self.run = run
self.summary = summary
self._f = None
self._f_lock = None
self._stdout = None
self._stderr = None
def __enter__(self):
self._f = open(self.run.guild_path("output"), "w")
self._f_lock = threading.Lock()
self._stdout = sys.stdout
sys.stdout = OutputTee(self._tee_fs(sys.stdout), self._f_lock)
self._stderr = sys.stderr
sys.stderr = OutputTee(self._tee_fs(sys.stderr), self._f_lock)
def _tee_fs(self, iof):
fs = [iof, self._f]
if self.summary:
fs.append(self.summary)
return fs
def __exit__(self, *exc):
with self._f_lock:
self._f.close()
if self.summary:
self.summary.close()
sys.stdout = self._stdout
sys.stderr = self._stderr
@functools.total_ordering
class RunIndex(object):
def __init__(self, run, fmt):
self.value = run
self.run = run # backward compatible alias
self.fmt = fmt
def __str__(self):
return self.value.short_id
def __eq__(self, x):
return self._x_id(x) == self.value.id
def __lt__(self, x):
return self.value.id < self._x_id(x)
@staticmethod
def _x_id(x):
if isinstance(x, six.string_types):
return x
elif isinstance(x, RunIndex):
return x.value.id
return None
class RunsSeries(pd.Series):
@property
def _constructor(self):
return RunsSeries
@property
def _constructor_expanddim(self):
return RunsDataFrame
def delete(self, **kw):
self.to_frame().delete(**kw)
def info(self, **kw):
_print_run_info(self[0], **kw)
def scalars(self):
return _runs_scalars([self[0].value])
def scalars_detail(self):
return _runs_scalars_detail([self[0].value])
def flags(self):
return _runs_flags([self[0].value])
def compare(self):
return _runs_compare([self[0]])
class RunsDataFrame(pd.DataFrame):
@property
def _constructor(self):
return RunsDataFrame
@property
def _constructor_sliced(self):
return RunsSeries
@property
def _constructor_expanddim(self):
return RunsDataFrame
def delete(self, permanent=False):
runs = self._runs()
var.delete_runs(runs, permanent)
return [run.id for run in runs]
def _runs(self):
return [row[1][0].value for row in self.iterrows()]
def _items(self):
return [row[1][0] for row in self.iterrows()]
# pylint: disable=arguments-differ
def info(self, *args, **kw):
self.loc[0].info(*args, **kw)
def scalars(self):
return _runs_scalars(self._runs())
def scalars_detail(self):
return _runs_scalars_detail(self._runs())
def flags(self):
return _runs_flags(self._runs())
def compare(self):
return _runs_compare(self._items())
class Batch(object):
def __init__(self, gen_trials, op, flag_vals, opts):
self.gen_trials = gen_trials
self.op = op
self.flag_vals = _coerce_range_functions(flag_vals)
self.opts = opts
def __call__(self):
runs = []
results = []
prev_results_cb = lambda: (runs, results)
for trial in self.gen_trials(self.flag_vals, prev_results_cb, **self.opts):
trial_flag_vals, trial_attrs = _split_gen_trial(trial)
print(
"Running %s (%s):"
% (self.op.__name__, op_util.flags_desc(trial_flag_vals))
)
run, result = _run(self.op, trial_flag_vals, self.opts, trial_attrs)
runs.append(run)
results.append(result)
return runs, results
def _split_gen_trial(trial):
if isinstance(trial, tuple):
assert len(trial) == 2, ("generated trial must be a two-tuple or a dict", trial)
return trial
else:
return trial, {}
def _coerce_range_functions(flag_vals):
return {name: _coerce_range_function(val) for name, val in flag_vals.items()}
def _coerce_range_function(val):
if isinstance(val, RangeFunction):
return str(val)
return val
class RangeFunction(object):
def __init__(self, name, *args):
self.name = name
self.args = args
def __str__(self):
args = ":".join([str(arg) for arg in self.args])
return "%s[%s]" % (self.name, args)
def batch_gen_trials(flag_vals, _prev_trials_cb, max_trials=None, **kw):
if kw:
log.warning("ignoring batch config: %s", kw)
max_trials = max_trials or DEFAULT_MAX_TRIALS
trials = 0
for trial_flag_vals in batch_util.expand_flags(flag_vals):
if trials >= max_trials:
return
trials += 1
yield trial_flag_vals
def optimizer_trial_generator(model_op):
main_mod = _optimizer_module(model_op.module_name)
try:
return main_mod.gen_trials
except AttributeError:
raise TypeError(
"%s optimizer module does not implement gen_trials" % main_mod.__name__
)
def _optimizer_module(module_name):
return importlib.import_module(module_name)
def uniform(low, high):
return RangeFunction("uniform", low, high)
def loguniform(low, high):
return RangeFunction("loguniform", low, high)
def run(op, *args, **kw):
if not callable(op):
raise ValueError("op must be callable")
opts = _pop_opts(kw)
flag_vals = _init_flag_vals(op, args, kw)
run = _init_runner(op, flag_vals, opts)
return run()
def _pop_opts(kw):
opts = {}
for name in list(kw):
if name[:1] == "_":
opts[name[1:]] = kw.pop(name)
return opts
def _init_flag_vals(op, args, kw):
# pylint: disable=deprecated-method
op_f = _op_f(op)
op_flag_vals = inspect.getcallargs(op_f, *args, **kw)
_remove_bound_method_self(op_f, op_flag_vals)
return _coerce_slice_vals(op_flag_vals)
def _op_f(op):
assert callable(op), repr(op)
if inspect.isfunction(op) or inspect.ismethod(op):
return op
assert hasattr(op, "__call__")
return op.__call__
def _remove_bound_method_self(op, op_flag_vals):
im_self = util.find_apply(
[
lambda: getattr(op, "__self__", None),
lambda: getattr(op, "im_self", None),
]
)
if im_self:
for key, val in op_flag_vals.items():
if val is im_self:
del op_flag_vals[key]
break
else:
assert False, (op_flag_vals, im_self)
def _coerce_slice_vals(flag_vals):
return {name: _coerce_slice_val(val) for name, val in flag_vals.items()}
def _coerce_slice_val(val):
if isinstance(val, slice):
return uniform(val.start, val.stop)
return val
def _init_runner(op, flag_vals, opts):
return util.find_apply(
[_optimize_runner, _batch_runner, _single_runner], op, flag_vals, opts
)
def _optimize_runner(op, flag_vals, opts):
optimizer = opts.get("optimizer")
if not optimizer:
return _maybe_random_runner(op, flag_vals, opts)
opts = _filter_kw(opts, ["optimizer"])
return Batch(_init_gen_trials(optimizer), op, flag_vals, opts)
def _filter_kw(opts, keys):
return {k: v for k, v in opts.items() if k not in keys}
def _maybe_random_runner(op, flag_vals, opts):
assert not opts.get("optimizer"), opts
for val in flag_vals.values():
if isinstance(val, RangeFunction):
return Batch(_init_gen_trials("random"), op, flag_vals, opts)
return None
def _init_gen_trials(optimizer):
try:
model_op, _name = model_proxy.resolve_plugin_model_op(optimizer)
except model_proxy.NotSupported:
raise TypeError("optimizer %r is not supported" % optimizer)
else:
return optimizer_trial_generator(model_op)
def _batch_runner(op, flag_vals, opts):
for val in flag_vals.values():
if isinstance(val, list):
return Batch(batch_gen_trials, op, flag_vals, opts)
return None
def _single_runner(op, flag_vals, opts):
return lambda: _run(op, flag_vals, opts)
def _run(op, flag_vals, opts, extra_attrs=None):
run = _init_run()
_init_run_attrs(run, op, flag_vals, opts, extra_attrs)
summary = _init_output_scalars(run, opts)
try:
with RunOutput(run, summary):
_write_proc_lock(run)
with util.Chdir(run.path):
result = op(**flag_vals)
except KeyboardInterrupt as e:
exit_status = exit_code.KEYBOARD_INTERRUPT
util.raise_from(RunTerminated(run, e), e)
except Exception as e:
exit_status = exit_code.DEFAULT_ERROR
util.raise_from(RunError(run, e), e)
else:
exit_status = 0
return run, result
finally:
_finalize_run(run, exit_status)
def _init_run():
run_id = runlib.mkid()
run_dir = os.path.join(var.runs_dir(), run_id)
run = runlib.Run(run_id, run_dir)
run.init_skel()
return run
def _init_run_attrs(run, op, flag_vals, opts, extra_attrs):
opref = opreflib.OpRef("func", "", "", "", _op_name(op, opts))
run.write_opref(opref)
run.write_attr("started", runlib.timestamp())
run.write_attr("flags", flag_vals)
run.write_attr("label", _run_label(flag_vals, opts))
if extra_attrs:
for name, val in extra_attrs.items():
run.write_attr(name, val)
def _op_name(op, opts):
return opts.get("op_name") or _default_op_name(op)
def _default_op_name(op):
if inspect.isfunction(op) or inspect.ismethod(op):
return op.__name__
return op.__class__.__name__
def _run_label(flag_vals, opts):
return op_util.run_label(_label_template(opts), flag_vals)
def _label_template(opts):
return util.find_apply([_explicit_label, _tagged_label], opts)
def _explicit_label(opts):
return opts.get("label")
def _tagged_label(opts):
try:
tag = opts["tag"]
except KeyError:
return None
else:
return "%s ${default_label}" % tag
def _init_output_scalars(run, opts):
config = opts.get("output_scalars", summary.DEFAULT_OUTPUT_SCALARS)
if not config:
return None
abs_guild_path = os.path.abspath(run.guild_path())
return summary.OutputScalars(config, abs_guild_path)
def _write_proc_lock(run):
op_util.write_proc_lock(os.getpid(), run)
def _finalize_run(run, exit_status):
run.write_attr("exit_status", exit_status)
run.write_attr("stopped", runlib.timestamp())
op_util.delete_proc_lock(run)
def runs(**kw):
runs = runs_impl.filtered_runs(_runs_cmd_args(**kw))
data, cols = _format_runs(runs)
return RunsDataFrame(data=data, columns=cols)
def _runs_cmd_args(
operations=None,
labels=None,
tags=None,
comments=None,
running=False,
completed=False,
error=False,
terminated=False,
pending=False,
staged=False,
unlabeled=None,
marked=False,
unmarked=False,
started=None,
digest=None,
deleted=None,
remote=None,
):
operations = operations or ()
labels = labels or ()
tags = tags or ()
comments = comments or ()
return click_util.Args(
filter_ops=operations,
filter_labels=labels,
filter_tags=tags,
filter_comments=comments,
status_running=running,
status_completed=completed,
status_error=error,
status_terminated=terminated,
status_pending=pending,
status_staged=staged,
filter_unlabeled=unlabeled,
filter_marked=marked,
filter_unmarked=unmarked,
filter_started=started,
filter_digest=digest,
deleted=deleted,
remote=remote,
)
def _format_runs(runs):
cols = (
"run",
"operation",
"started",
"status",
"label",
)
data = [_format_run(run, cols) for run in runs]
return data, cols
def _format_run(run, cols):
fmt = run_util.format_run(run)
return [_run_attr(run, name, fmt) for name in cols]
def _run_attr(run, name, fmt):
if name == "run":
return RunIndex(run, fmt)
elif name in ("operation",):
return fmt[name]
elif name in ("started", "stopped"):
return _datetime(run.get(name))
elif name in ("label",):
return run.get(name, "")
elif name == "time":
return _run_time(run)
else:
return getattr(run, name)
def _datetime(ts):
if ts is None:
return None
return datetime.datetime.fromtimestamp(int(ts / 1000000))
def _run_time(run):
formatted_time = util.format_duration(run.get("started"), run.get("stopped"))
return pd.to_timedelta(formatted_time)
def _print_run_info(item, output=False, scalars=False):
for name in RUN_DETAIL:
print("%s: %s" % (name, item.fmt.get(name, "")))
print("flags:", end="")
print(run_util.format_attr(item.value.get("flags", "")))
if scalars:
print("scalars:")
for s in indexlib.iter_run_scalars(item.value):
print(" %s: %f (step %i)" % (s["tag"], s["last_val"], s["last_step"]))
if output:
print("output:")
for line in run_util.iter_output(item.value):
print(" %s" % line, end="")
def _runs_scalars(runs):
data = []
cols = [
"run",
"prefix",
"tag",
"first_val",
"first_step",
"last_val",
"last_step",
"min_val",
"min_step",
"max_val",
"max_step",
"avg_val",
"count",
"total",
]
for run in runs:
for s in indexlib.iter_run_scalars(run):
data.append(s)
return | pd.DataFrame(data, columns=cols) | pandas.DataFrame |
import pandas as pd
import json
import sys
from CostEmissionCalculator import cost_emission_calculation
if __name__ == "__main__":
rootPath = json.loads(sys.argv[1])
generation_technology_sent = sys.argv[2]
fuel_type_sent = sys.argv[3]
# DEFINE PARAMETERS
## define user's input parameters (decide which surrogate model to use for calculation)
#generation_technology = 'ultrasubcritical'
### choose from ultrasubcritical, supercritical, subcritical, NGCC, IGCC, cogeneration
#fuel_type = 'coal'
### choose from coal, coal_biomass_cofiring, lignite, anthracite, bituminous, subbituminous, natural gas, oil
## define parameters from powerplant knowledge base (use SPARQL to retrieve information from powerplant .owl files)
# load the powerplant database
df = pd.read_csv(rootPath + 'data/input/powerplant_database.csv', header='infer', sep=',')
cols = list(df)
cols.remove('year')
# use SPARQL query to get these results
country, capacity, primary_fuel, generation_technology, age, output, fuel_used ='Australia', 114, 'bituminous', 'subcritical', 30, 5000,'coal'
plant_data = [country, capacity, primary_fuel, generation_technology, age, output, fuel_used]
plant_df = pd.DataFrame([plant_data], columns = cols)
# GET THE REFERENCE EMISSION DATABASE (CHOOSE THE RIGHT REFERENCE DATABASE BY GENERATION TECHNOLOGY AND FUEL)
generation_technology = generation_technology_sent
fuel_type = fuel_type_sent
## coal
if generation_technology == 'ultrasubcritical':
emission_df = pd.read_csv(rootPath + 'data/input/baseplant/base_ultrasubcritical_PC_coal.csv', header='infer', sep=',')
if generation_technology == 'supercritical':
if primary_fuel == 'anthracite':
emission_df = pd.read_csv(rootPath + 'data/input/baseplant/base_supercritical_PC_anthracite.csv', header='infer', sep=',')
elif primary_fuel == 'subbituminous':
emission_df = | pd.read_csv(rootPath + 'data/input/baseplant/base_supercritical_PC_subbituminous.csv', header='infer', sep=',') | pandas.read_csv |
#########
# GLOBALS
#########
from itertools import islice
import pandas as pd
import dateutil.parser as dp
from scipy.stats import boxcox
from realtime_talib import Indicator
#from nltk import word_tokenize
#from nltk.corpus import stopwords
#from nltk.stem.porter import *
#from scipy.integrate import simps
#from sklearn.model_selection import train_test_split
#from sklearn.utils import resample
#from selenium import webdriver
RANDOM_STATE = 42
#######################
# GENERAL PREPROCESSORS
#######################
def calculate_indicators(ohlcv_df):
ohlcv_df = ohlcv_df.drop(["Volume (BTC)", "Weighted Price"], axis=1)
ohlcv_df.columns = ["Date", "Open", "High", "Low", "Close", "Volume"]
temp_ohlcv_df = ohlcv_df.copy()
# Converts ISO 8601 timestamps to UNIX
unix_times = [int(dp.parse(temp_ohlcv_df.iloc[index]["Date"]).strftime('%s')) for index in range(temp_ohlcv_df.shape[0])]
temp_ohlcv_df["Date"] = pd.Series(unix_times).values
# Converts column headers to lowercase and sorts rows in chronological order
temp_ohlcv_df.columns = ["date", "open", "high", "low", "close", "volume"]
temp_ohlcv_df = temp_ohlcv_df.iloc[::-1]
# Rate of Change Ratio
rocr3 = Indicator(temp_ohlcv_df, "ROCR", 3).getHistorical()[::-1]
rocr6 = Indicator(temp_ohlcv_df, "ROCR", 6).getHistorical()[::-1]
# Average True Range
atr = Indicator(temp_ohlcv_df, "ATR", 14).getHistorical()[::-1]
# On-Balance Volume
obv = Indicator(temp_ohlcv_df, "OBV").getHistorical()[::-1]
# Triple Exponential Moving Average
trix = Indicator(temp_ohlcv_df, "TRIX", 20).getHistorical()[::-1]
# Momentum
mom1 = Indicator(temp_ohlcv_df, "MOM", 1).getHistorical()[::-1]
mom3 = Indicator(temp_ohlcv_df, "MOM", 3).getHistorical()[::-1]
# Average Directional Index
adx14 = Indicator(temp_ohlcv_df, "ADX", 14).getHistorical()[::-1]
adx20 = Indicator(temp_ohlcv_df, "ADX", 20).getHistorical()[::-1]
# Williams %R
willr = Indicator(temp_ohlcv_df, "WILLR", 14).getHistorical()[::-1]
# Relative Strength Index
rsi6 = Indicator(temp_ohlcv_df, "RSI", 6).getHistorical()[::-1]
rsi12 = Indicator(temp_ohlcv_df, "RSI", 12).getHistorical()[::-1]
# Moving Average Convergence Divergence
macd, macd_signal, macd_hist = Indicator(
temp_ohlcv_df, "MACD", 12, 26, 9).getHistorical()
macd, macd_signal, macd_hist = macd[::-
1], macd_signal[::-1], macd_hist[::-1]
# Exponential Moving Average
ema6 = Indicator(temp_ohlcv_df, "MA", 6, 1).getHistorical()[::-1]
ema12 = Indicator(temp_ohlcv_df, "MA", 12, 1).getHistorical()[::-1]
# Append indicators to the input datasets
min_length = min(len(mom1), len(mom3), len(adx14), len(adx20), len(willr),
len(rsi6), len(rsi12), len(macd), len(
macd_signal), len(macd_hist),
len(ema6), len(ema12), len(rocr3), len(rocr6), len(atr), len(obv), len(trix))
ohlcv_df = ohlcv_df[:min_length].drop(["Open", "High", "Low"], axis=1)
ohlcv_df["MOM (1)"] = pd.Series(mom1[:min_length]).values
ohlcv_df["MOM (3)"] = pd.Series(mom3[:min_length]).values
ohlcv_df["ADX (14)"] = pd.Series(adx14[:min_length]).values
ohlcv_df["ADX (20)"] = pd.Series(adx20[:min_length]).values
ohlcv_df["WILLR"] = pd.Series(willr[:min_length]).values
ohlcv_df["RSI (6)"] = pd.Series(rsi6[:min_length]).values
ohlcv_df["RSI (12)"] = pd.Series(rsi12[:min_length]).values
ohlcv_df["MACD"] = pd.Series(macd[:min_length]).values
ohlcv_df["MACD (Signal)"] = pd.Series(macd_signal[:min_length]).values
ohlcv_df["MACD (Historical)"] = | pd.Series(macd_hist[:min_length]) | pandas.Series |
import xlrd
import pandas as pd
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, StratifiedKFold
from sklearn.metrics import confusion_matrix
from Utils import *
from Model import FeatureReduction
"""----------------------User Configuration----------------------"""
n_train = 10000 # time of stacked autoecoders trainging for meidan weight and bias
h_units = [128, 32, 4, 1] # number of units in each hidden layer
fpath = 'path/storing/your/features/files(.txt)'
lpath = 'path/storing/your/labels/files(.txt)'
gpath = 'path/storing/files/indicating/treatment/group(.txt)'
x = np.loadtxt(fpath, delimiter='\t')
y = np.squeeze(np.genfromtxt(lpath))
nested_skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=101)
group = np.genfromtxt(gpath)
index, reverse_index = np.unique(group, return_inverse=True)
fold_contribution = np.zeros((x.shape[1], index.size))
eval_metrics = np.zeros((index.size, 3))
for n_fold in range(index.size):
"""Binary processing and pre-training SAE"""
x_test, y_test = x[reverse_index == n_fold], y[reverse_index == n_fold]
x_train, y_train = x[reverse_index != n_fold], y[reverse_index != n_fold]
x_train_bin, x_test_bin = binarization(x_train, x_test, y_train)
total_AE_weight, total_AE_bias = sae_pretraining(x_train_bin, h_units, n_train)
median_weight, median_bias = median_init(total_AE_weight), median_init(total_AE_bias)
"""Training Feed Forward Network"""
fr_nn = FeatureReduction(x_train_bin.shape[0], h_units, median_weight, median_bias)
optimizer = optim.Adam(fr_nn.parameters(), lr=0.01)
nn_weights = train_nn(fr_nn, optimizer, x_train_bin, y_train)
ldc_train, ldc_test = nn_ldc(fr_nn, x_train_bin), nn_ldc(fr_nn, x_test_bin)
"""SVM classifier"""
svm_init = SVC(kernel='linear')
grid = GridSearchCV(svm_init, {'C': np.logspace(-3, 4, 8)}, cv=nested_skf, scoring='balanced_accuracy', n_jobs=5)
grid.fit(ldc_train, y_train)
svm = SVC(C=grid.best_params_['C'], kernel='linear')
svm.fit(ldc_train, y_train)
y_pred = svm.predict(ldc_test)
tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()
fold_sensitivity = tp / (tp + fn)
fold_specificity = tn / (tn + fp)
fold_balanced_accuracy = (fold_sensitivity + fold_specificity) / 2
eval_metrics[n_fold, 0] = fold_sensitivity
eval_metrics[n_fold, 1] = fold_specificity
eval_metrics[n_fold, 2] = fold_balanced_accuracy
"""Print classification results"""
df = | pd.DataFrame(eval_metrics) | pandas.DataFrame |
import pymongo
from nspyre.instrument_manager import Instrument_Manager
from nspyre.utils import *
# from instrument_server import Remote_Device
import pandas as pd
import numpy as np
from lantz import Q_
from pint.util import infer_base_unit
import traceback
import inspect
from collections import OrderedDict
from tqdm.auto import tqdm
from nspyre.data_handling import save_data
import threading
class MissingDeviceError(Exception):
pass
class MissingSpyreletError(Exception):
pass
class StopRunning(Exception):
pass
class Spyrelet():
# A dict with the names and associated class of the devices required to run this spyrelet
REQUIRED_DEVICES = dict()
# A dict with the name and associated class of the sub-spyrelet required to run this spyrelet
REQUIRED_SPYRELETS = dict()
# A definition of the parametters that are used as arguments to the main/initialize/finalize functions.
# These are used both to generate a launcher GUI and to enforce units at call time.
PARAMS = dict()
# An extra dictionary, which can be defined by the user at initialization time. This can store anything the users want
CONSTS = dict()
"""
A few notes about the spyrelet class:
- This is the class you need to subclass for making experiments.
- All devices used in the spyrelet must be listed in the REQURIRED_DEVICES dict
- All sub-spyrelet must also be listed in the REQUIRED_SPYRELETS dict
- Upon instanciation the class will check the __init__ arguments devices and spyrelets to make sure they satisfy these requirements
- For higher performance we will store the data internally as a list instead of a dataframe. Quicker to append to a list.
"""
def __init__(self, unique_name, spyrelets={}, device_alias={}, mongodb_addr=None, manager=None, manager_timeout=30000, **consts):
self.name = unique_name
self.progress = tqdm
self.spyrelets = spyrelets
self.CONSTS = self.CONSTS.copy()
self.CONSTS.update(**consts)
self.last_kwargs = dict()
if manager is None:
manager = Instrument_Manager(timeout=manager_timeout)
self.mongodb_addr = mongodb_addr
self.validate()
reg_entry = {
'_id':unique_name,
'class':"{}.{}".format(self.__class__.__module__, self.__class__.__name__),
}
self.client = get_mongo_client(mongodb_addr)
self.col = self.client['Spyre_Live_Data'][unique_name]
self.client['Spyre_Live_Data']['Register'].update_one({'_id':unique_name},{'$set':reg_entry}, upsert=True)
self.clear_data()
# This is imported here otherwise the import may occur before the Remote_Device_Instance is dynamically generated...
# from instrument_server import Remote_Device
devices = manager.get_devices()
for dname, dclass in self.REQUIRED_DEVICES.items():
real_dname = device_alias[dname] if dname in device_alias else dname
if real_dname in devices:
# isRemoteDevice = issubclass(type(devices[dname]), Remote_Device)
#This is a convoluted way of checking subclass
isRemoteDevice = any(['spyre.instrument_server.Remote_Device' in str(c) for c in inspect.getmro(type(devices[real_dname]))])
if isRemoteDevice :
inst_dclass = get_class_from_str(devices[real_dname].info['class'])
dev = devices[real_dname]
else:
inst_dclass = type(devices[real_dname])
dev = devices[real_dname]
if issubclass(inst_dclass, dclass):
setattr(self, dname, dev)
else:
raise MissingDeviceError("Device requirements for this spyrelets ({}) was not met. Misssing: {}".format(self.name, dname))
for sname, sclass in self.REQUIRED_SPYRELETS.items():
if sname in spyrelets and isinstance(spyrelets[sname], sclass):
setattr(self, sname, spyrelets[sname])
else:
raise MissingSpyreletError("Sub-Spyrelet requirements for this spyrelets ({}) was not met. Misssing: {}".format(self.name, sname))
def set_defaults(self, **params_dict):
d = {'defaults.{}'.format(key):val for key,val in custom_encode(params_dict).items()}
if len(d):
return self.client['Spyre_Live_Data']['Register'].update_one({'_id':self.name},{'$set':d}, upsert=True)
def run(self, *args, **kwargs):
self.progress = kwargs.pop('progress') if ('progress' in kwargs) else tqdm
clear_data = kwargs.pop('clear_data') if ('clear_data' in kwargs) else True
if self.progress is None: self.progress = lambda *args, **kwargs: tqdm(*args, leave=False, **kwargs)
try:
args, kwargs = self.enforce_args_units(*args, **kwargs)
self._stop_flag = False
if clear_data:
self.clear_data()
self.initialize(*args, **kwargs)
self.main(*args, **kwargs)
except StopRunning:
print('stopping spyrelet')
pass
except:
traceback.print_exc()
finally:
self.finalize(*args, **kwargs)
def bg_run(self, *args, **kwargs):
t = threading.Thread(target=lambda: self.run(*args, **kwargs))
t.start()
return t
def enforce_args_units(self, *args, **kwargs):
args = list(args)
def _enforce_units(val, param):
if 'units' in param:
if type(val) is Q_:
return val.to(param['units'])
else:
return val*Q_(1, param['units'])
else:
return val
sig = inspect.signature(self.main)
params = list(sig.parameters.keys())
for name in list(self.PARAMS.keys()):
param_index = params.index(name)
if param_index >= len(args):
if name in kwargs:
kwargs[name] = _enforce_units(kwargs[name], self.PARAMS[name])
else:
kwargs[name] = _enforce_units(sig.parameters[name].default, self.PARAMS[name])
else:
args[param_index] = _enforce_units(args[param_index], self.PARAMS[name])
return args, kwargs
def main(self, *args, **kwargs):
"""This is the method that will contain the user main logic. Should be overwritten"""
raise NotImplementedError
def initialize(self, *args, **kwargs):
"""This is the method that will contain the user initialize logic. Should be overwritten"""
pass
def finalize(self, *args, **kwargs):
"""This is the method that will contain the user finalize logic. Should be overwritten
This will run even if the initialize or main errors out
"""
pass
def call(self, spyrelet, *args, **kwargs):
"""This is the proper way to call a sub spyrelet.
It will take care of keeping the data and calling the proper progress bar.
If use_defaults is True (defaults to True) every argument needs to be passed as a keyword argument (ie *args will be ignored)
"""
keep_data = kwargs.pop('keep_data') if 'keep_data' in kwargs else True
# use_defaults = kwargs.pop('use_defaults') if 'use_defaults' in kwargs else True
ignore_child_progress = kwargs.pop('ignore_child_progress') if 'ignore_child_progress' in kwargs else False
if ignore_child_progress:
progress = lambda x: x
else:
progress = self.progress
# if not use_defaults:
spyrelet.run(*args, progress=progress, **kwargs)
# else:
# launcher = Spyrelet_Launcher(spyrelet)
# launcher.run(progress=self.progress,**kwargs)
if keep_data:
if not spyrelet.name in self._child_data:
self._child_data[spyrelet.name] = list()
self._child_data[spyrelet.name].append(spyrelet.data)
def stop(self):
self._stop_flag = True
for sname in self.REQUIRED_SPYRELETS:
getattr(self, sname).stop()
def clear_data(self):
self.col.drop()
self._data = list()
self._child_data = dict()
CASTING_DICT = {np.int32:int, np.float64:float}
def acquire(self, row):
# Cleanup row
# Here we will keep numpy arrays as is for local copy, but change it to list for MongoDB
if not row is None:
restore_row = dict()
for k, val in row.items():
if type(val) == Q_:
base_unit = '*'.join('{} ** {}'.format(u, p) for u, p in infer_base_unit(val).items())
row[k] = row[k].to(Q_(base_unit)).m
if type(val) == np.ndarray:
restore_row[k] = row[k]
row[k] = row[k].tolist()
if type(val) in self.CASTING_DICT:
row[k] = self.CASTING_DICT[type(val)](row[k])
self.col.insert_one(row)
for k, val in restore_row.items():
row[k] = val
self._data.append(row)
if self._stop_flag:
raise StopRunning
"""
The cache allows for passing information from the spyrelet to remote monitoring processes.
This cache is meant to be temporary, so unlike data which simply accumulates, this can be overwritten asynchronously
"""
def reg_cache_clear(self):
self.client['Spyre_Live_Data']['Register'].update_one({'_id':self.name},{'$set':{'cache':{}}}, upsert=True)
def reg_cache_store(self, **kwargs):
d = {'cache.{}'.format(key):val for key,val in custom_encode(kwargs).items()}
if len(d):
return self.client['Spyre_Live_Data']['Register'].update_one({'_id':self.name},{'$set':d}, upsert=True)
def reg_cache_get(self):
ans = self.client['Spyre_Live_Data']['Register'].find_one({'_id':self.name},{'_id':False, 'cache':True})
return custom_decode(ans['cache']) if 'cache' in ans else {}
@property
def child_data(self):
return self._child_data
@property
def data(self):
return | pd.DataFrame(self._data) | pandas.DataFrame |
# Import Packages
import os
import configparser
import argparse
import numpy as np
import pandas as pd
import pandapower as pp
import pandapower.networks
from borg import *
def input_parse():
"""
Parse inputs to global vars
@return:
"""
# Global vars used in simulation
global path_to_data
global path_to_generator_limits
global path_to_cost_coef
global path_to_emit_coef
global path_to_bus_limits
global path_to_runtime
# Local vars
config_inputs = configparser.ConfigParser()
argparse_inputs = argparse.ArgumentParser()
# Command line arguments (these get priority)
argparse_inputs.add_argument(
'-c',
'--config_file',
type=str,
action='store',
help='Path to configuration file',
required=True
)
# Parse arguments
argparse_inputs = argparse_inputs.parse_args()
# Parse config file
config_inputs.read(argparse_inputs.config_file)
# Store inputs
path_to_data = config_inputs['MAIN IO']['data']
path_to_generator_limits = os.path.join(path_to_data, config_inputs['INPUT']['path_to_generator_limits'])
path_to_cost_coef = os.path.join(path_to_data, config_inputs['INPUT']['path_to_cost_coef'])
path_to_emit_coef = os.path.join(path_to_data, config_inputs['INPUT']['path_to_emit_coef'])
path_to_bus_limits = os.path.join(path_to_data, config_inputs['INPUT']['path_to_bus_limits'])
path_to_runtime = os.path.join(path_to_data, config_inputs['OUTPUT']['path_to_runtime'])
return 0
def solve_power_flow(*vars):
"""
Solve for the power at generator 1 such that the powerflow equations are satisfied
@param vars: tuple: Activate Power for Generators 2-5 in MW
@return: DataFrames: Generator power outputs and bus voltages
"""
# Initialize Vars
gen_names = ['G_1', 'G_2', 'G_3', 'G_4', 'G_5', 'G_6']
# Import the Network
net = pandapower.networks.case_ieee30()
# Map Decisions
net.gen['p_mw'] = vars
# Solve the Powerflow Equations
pp.runpp(net, init="flat", numba=False, enforce_q_lims=False, calculate_voltage_angles=True)
# Extract Generators
gen_df = pd.DataFrame({'p_mw': net.res_gen['p_mw'].to_list() + net.res_ext_grid['p_mw'].to_list()}, index=gen_names)
bus_df = pd.DataFrame(net.res_bus['vm_pu'])
# Export
return gen_df, bus_df
def get_gen_constraint(df):
"""
Get generator power constraint value
@param df: DataFrame: Generator power outputs
@return: float: constraint value (anything other than 0.0 is an infeasible set of generator values)
"""
# Initialize Vars
cons = 0.0
# Import Generation Lim
lim = pd.read_csv(path_to_generator_limits, index_col=0)
# Exceeds Maximum Capacity
cons = cons + float(sum(x for x in df['p_mw']/100 - lim['max'] if x > 0)) # Unit Power
cons = cons + float(abs(sum(x for x in df['p_mw']/100 - lim['min'] if x < 0)))
# Export
return cons
def get_cost(df):
"""
Get fuel cost of current set of generators
@param df: DataFrame: Generator power outputs
@return: float: Fuel cost of current set of generators
"""
# Initialize Vars
obj = 0.0
# Import Cost Coefficients
cost_df = pd.read_csv(path_to_cost_coef, index_col=0)
# Compute Cost
term1 = cost_df['a']
term2 = cost_df['b'] * (df['p_mw']/100)
term3 = cost_df['c'] * (df['p_mw']/100)**2
obj = obj + np.sum(term1 + term2 + term3)
# Export
return obj
def get_emission(df):
"""
Get emission value of current set of generators
@param df: DataFrame: Generator power outputs
@return: float: Emission value of current set of generators
"""
# Initialize Vars
obj = 0.0
# Import Emissions Coefficients
emit_df = | pd.read_csv(path_to_emit_coef, index_col=0) | pandas.read_csv |
# Copyright 2021 Rosalind Franklin Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import subprocess
import yaml
import pandas as pd
from icecream import ic
from . import metadata as mdMod
class ctffind():
"""
Class encapsulating a ctffind object
"""
def __init__(self,
project_name,
md_in,
params_in,
logger_in,
):
"""
Initialising a ctffind object
ARGS:
project_name (str) :: name of current project
md_in (Metadata) :: metadata containing information of tilt-series to be processed
params_in (Params) :: params object containing configurations for ctffind
logger_in (Logger) :: logger object for recording ctffind process
"""
self.proj_name = project_name
self.prmObj = params_in
self.params = self.prmObj.params
self.logObj = logger_in
self.log = []
self._process_list = self.params['System']['process_list']
self.meta = pd.DataFrame(md_in.metadata)
self.meta = self.meta[self.meta['ts'].isin(self._process_list)]
self._get_images()
self.no_processes = False
self._check_processed_images()
self._set_output_path()
# Check if output folder exists, create if not
if not os.path.isdir(self.params['System']['output_path']):
subprocess.run(['mkdir', self.params['System']['output_path']],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding='ascii')
def _get_images(self):
"""
Method to extract images for ctffind
Note: one image per tilt-series, criterion: image closest to 0 tilt angle
"""
self.ctf_images = pd.DataFrame(columns=self.meta.columns)
for curr_ts in self._process_list:
temp = self.meta[self.meta['ts']==curr_ts]
# ts_image = temp.loc[temp['angles'].abs().idxmin(axis=0)]
self.ctf_images = self.ctf_images.append(temp,
ignore_index=True)
def _set_output_path(self):
"""
Subroutine to set input and output path for "ctffound" images
"""
# copy values from output column to file_paths (input) column
self.ctf_images['file_paths'] = self.ctf_images.apply(lambda df: df['output'], axis=1)
# update output column
self.ctf_images['output'] = self.ctf_images.apply(
lambda row: f"{self.params['System']['output_path']}"
f"{self.params['System']['output_prefix']}_{row['ts']:03}_{row['angles']}_ctffind.mrc", axis=1)
def _check_processed_images(self):
"""
Method to check images which have already been processed before
"""
# Create new empty internal output metadata if no record exists
if not os.path.isfile(self.proj_name + '_ctffind_mdout.yaml'):
self.meta_out = | pd.DataFrame(columns=self.meta.columns) | pandas.DataFrame |
def setup_fs(s3, key="", secret="", endpoint="", cert="", passwords={}):
"""Given a boolean specifying whether to use local disk or S3, setup filesystem
Syntax examples: AWS (http://s3.us-east-2.amazonaws.com), MinIO (http://192.168.0.1:9000)
The cert input is relevant if you're using MinIO with TLS enabled, for specifying the path to the certficiate.
The block_size is set to accomodate files up to 55 MB in size. If your log files are larger, adjust this value accordingly
"""
if s3:
import s3fs
block_size = 55 * 1024 * 1024
if "amazonaws" in endpoint:
fs = s3fs.S3FileSystem(key=key, secret=secret, default_block_size=block_size)
elif cert != "":
fs = s3fs.S3FileSystem(
key=key,
secret=secret,
client_kwargs={"endpoint_url": endpoint, "verify": cert},
default_block_size=block_size,
)
else:
fs = s3fs.S3FileSystem(
key=key,
secret=secret,
client_kwargs={"endpoint_url": endpoint},
default_block_size=block_size,
)
else:
from pathlib import Path
import canedge_browser
base_path = Path(__file__).parent
fs = canedge_browser.LocalFileSystem(base_path=base_path, passwords=passwords)
return fs
# -----------------------------------------------
def load_dbc_files(dbc_paths):
"""Given a list of DBC file paths, create a list of conversion rule databases"""
import can_decoder
from pathlib import Path
db_list = []
for dbc in dbc_paths:
db = can_decoder.load_dbc(Path(__file__).parent / dbc)
db_list.append(db)
return db_list
# -----------------------------------------------
def list_log_files(fs, devices, start_times, verbose=True, passwords={}):
"""Given a list of device paths, list log files from specified filesystem.
Data is loaded based on the list of start datetimes
"""
import canedge_browser, mdf_iter
log_files = []
if len(start_times):
for idx, device in enumerate(devices):
start = start_times[idx]
log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords)
log_files.extend(log_files_device)
if verbose:
print(f"Found {len(log_files)} log files\n")
return log_files
def restructure_data(df_phys, res, full_col_names=False, pgn_names=False):
import pandas as pd
from J1939_PGN import J1939_PGN
df_phys_join = pd.DataFrame({"TimeStamp": []})
if not df_phys.empty:
for message, df_phys_message in df_phys.groupby("CAN ID"):
for signal, data in df_phys_message.groupby("Signal"):
pgn = J1939_PGN(int(message)).pgn
if full_col_names == True and pgn_names == False:
col_name = str(hex(int(message))).upper()[2:] + "." + signal
elif full_col_names == True and pgn_names == True:
col_name = str(hex(int(message))).upper()[2:] + "." + str(pgn) + "." + signal
elif full_col_names == False and pgn_names == True:
col_name = str(pgn) + "." + signal
else:
col_name = signal
df_phys_join = pd.merge_ordered(
df_phys_join,
data["Physical Value"].rename(col_name).resample(res).pad().dropna(),
on="TimeStamp",
fill_method="none",
).set_index("TimeStamp")
return df_phys_join
def test_signal_threshold(df_phys, signal, threshold):
"""Illustrative example for how to extract a signal and evaluate statistical values
vs. defined thresholds. The function can be easily modified for your needs.
"""
df_signal = df_phys[df_phys["Signal"] == signal]["Physical Value"]
stats = df_signal.agg(["count", "min", "max", "mean", "std"])
delta = stats["max"] - stats["min"]
if delta > threshold:
print(f"{signal} exhibits a 'max - min' delta of {delta} exceeding threshold of {threshold}")
def add_custom_sig(df_phys, signal1, signal2, function, new_signal):
"""Helper function for calculating a new signal based on two signals and a function.
Returns a dataframe with the new signal name and physical values
"""
import pandas as pd
try:
s1 = df_phys[df_phys["Signal"] == signal1]["Physical Value"].rename(signal1)
s2 = df_phys[df_phys["Signal"] == signal2]["Physical Value"].rename(signal2)
df_new_sig = pd.merge_ordered(
s1,
s2,
on="TimeStamp",
fill_method="ffill",
).set_index("TimeStamp")
df_new_sig = df_new_sig.apply(lambda x: function(x[0], x[1]), axis=1).dropna().rename("Physical Value").to_frame()
df_new_sig["Signal"] = new_signal
df_phys = df_phys.append(df_new_sig)
except:
print(f"Warning: Custom signal {new_signal} not created\n")
return df_phys
# -----------------------------------------------
class ProcessData:
def __init__(self, fs, db_list, signals=[], days_offset=None, verbose=True):
from datetime import datetime, timedelta
self.db_list = db_list
self.signals = signals
self.fs = fs
self.days_offset = days_offset
self.verbose = verbose
if self.verbose == True and self.days_offset != None:
date_offset = (datetime.today() - timedelta(days=self.days_offset)).strftime("%Y-%m-%d")
print(
f"Warning: days_offset = {self.days_offset}, meaning data is offset to start at {date_offset}.\nThis is intended for sample data testing only. Set days_offset = None when processing your own data."
)
return
def extract_phys(self, df_raw):
"""Given df of raw data and list of decoding databases, create new def with
physical values (no duplicate signals and optionally filtered/rebaselined)
"""
import can_decoder
import pandas as pd
df_phys = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 27 13:30:31 2020
@author: User
"""
import sys
import datetime as dt
from collections import Counter
import pprint
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from matplotlib import cm
from matplotlib import gridspec
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
# import os
from platform import system
import glob
import cycler
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from bs4 import BeautifulSoup
import re
from scipy.stats import linregress
# from sklearn import linear_model
import scipy.signal
import itertools
from itertools import chain, repeat
import logging
import datetime as dt
from pathlib import Path
# import h5py
from multiprocessing import Pool, cpu_count
# import timeit
# import time
matplotlib.rcParams.update({"font.size": 16})
plt.rcParams["font.family"] = "sans-serif"
plt.rcParams["font.sans-serif"] = "Helvetica"
plt.rcParams["axes.edgecolor"] = "#333F4B"
plt.rcParams["xtick.color"] = "#333F4B"
plt.rcParams["ytick.color"] = "#333F4B"
try:
import statsmodels.formula.api as smf
import statsmodels.api as sm
import seaborn as sns
except Exception as e:
print("No modules: %s" % e)
from file_py_helper.find_folders import FindExpFolder
from file_py_helper.file_functions import FileOperations
from file_py_helper.PostChar import (
SampleSelection,
Characterization_TypeSetting,
SampleCodesChar,
)
if __name__ == "__main__":
print(f"Package: {__package__}, File: {__file__}")
from elchempy.main_run_PAR_DW import ECRunOVV
from elchempy.indexer.prepare_input import CleanUpCrew
from elchempy.experiments.EIS.models import Model_Collection
import post_helper
import merger
# import EC
# sys.path.append(list(FH_path.rglob('*.py')))
# import FH_path.joinpath('FindExpFolder.py')
# import FindExpFolder.py
# from FileHelper import FindExpFolder
# from FindExpFolder import *
# from .experiments import EIS
# from .runEC import run_PAR_DW
from elchempy.runEC.EC_logging_config import start_logging
# logger = start_logging(__name__)
else:
# print('\n\n***** run_PAR_DW *****')
print(f"File: {__file__}, Name:{__name__}, Package:{__package__}")
# FH_path = Path(__file__).parent.parent.parent
# sys.path.append(str(FH_path))
# import FileHelper
from elchempy.main_run_PAR_DW import ECRunOVV
from elchempy.indexer.prepare_input import CleanUpCrew
from elchempy.runEC.EC_logging_config import start_logging
from elchempy.PostEC import post_helper, merger
from elchempy.experiments.EIS.models import Model_Collection
# logger = start_logging(__name__)
_logger = logging.getLogger(__name__)
_logger.setLevel(20)
EvRHE = "E_AppV_RHE"
class PostEC:
AllColls = [
"Unnamed: 0",
"Segment #",
"Point #",
"E(V)",
"I(A)",
"Elapsed Time(s)",
"Current Range",
"Status",
"E Applied(V)",
"Frequency(Hz)",
"Z Real",
"Z Imag",
"ActionId",
"AC Amplitude",
"RHE_OCP",
"E_AppV_RHE",
"E_Applied_VRHE",
"j A/cm2",
"jmAcm-2",
"jcorr",
"Gas",
"EXP",
"Electrode",
"j_ring",
"RPM",
"Comment",
"Measured_OCP",
"pH",
"Electrolyte",
"ScanRate_calc",
"SampleID",
"File",
"BaseName",
"hash",
"Instrument",
"DATE",
"EvRHE_diff",
"DestFile",
"Sweep_Type",
"Type",
"Cycle",
"DAC_V",
"Scanrate",
"ORR_scan",
"Jcorr",
"J_N2_scan",
"J_O2_diff",
"J_O2_diff_diff",
"Analysis_date",
"J_2nd_diff",
"Jkin_max",
"Jkin_min",
"E_onset",
"Diff_lim",
"E_half",
"I(A)_ring",
"I(A)_disk",
"Frac_H2O2",
"J_ring",
"n_ORR",
]
DropColls = [
"Unnamed: 0",
"Segment #",
"Point #",
"E(V)",
"I(A)",
"Elapsed Time(s)",
"Current Range",
"Status",
"E Applied(V)",
"Frequency(Hz)",
"Z Real",
"Z Imag",
"ActionId",
"AC Amplitude",
"RHE_OCP",
"E_AppV_RHE",
"jmAcm-2",
"jcorr",
"Gas",
"EXP",
"Electrode",
"j_ring",
"RPM",
"Comment",
"Measured_OCP",
"pH",
"Electrolyte",
"ScanRate_calc",
"SampleID",
"File",
"BaseName",
"hash",
"Instrument",
"DATE",
"EvRHE_diff",
"DestFile",
"Sweep_Type",
"Type",
"Cycle",
"DAC_V",
"Scanrate",
"ORR_scan",
"Jcorr",
"J_N2_scan",
"J_O2_diff",
"J_O2_diff_diff",
"Analysis_date",
"J_2nd_diff",
"Jkin_max",
"Jkin_min",
"E_onset",
"Diff_lim",
"E_half",
"I(A)_ring",
"I(A)_disk",
"Frac_H2O2",
"J_ring",
"n_ORR",
]
KeepColls = [
"E_AppV_RHE",
"jmAcm-2",
"Jcorr",
"J_N2_scan",
"Jkin_max",
"Jkin_min",
"Frac_H2O2",
"J_ring",
"n_ORR",
]
# SampleCodes = FindExpFolder.LoadSampleCode()
# FindExpFolder('VERSASTAT').SampleCodeLst
# PostDestDir.mkdir(parents=True,exist_ok=True)
# ExpPARovv = EC_loadOVV()
# OnlyRecentMissingOVV = runEC.MainPrepareList()
# ExpPARovv = ExpPARovv.iloc[100:120]
OutParsID = pd.DataFrame()
# Go1, Go2, Go3 = True, False, False
# Go1, Go2, Go3 = False, True, False
Go1, Go2, Go3 = False, False, True
# KL_coeff = KL_coefficients()
EvRHE_List = [
0,
0.1,
0.2,
0.3,
0.4,
0.45,
0.5,
0.55,
0.6,
0.65,
0.7,
0.75,
0.8,
0.9,
1,
]
def __init__(self):
self.DestDir = FindExpFolder("VERSASTAT").PostDir
@staticmethod
def StartLogging(level_log="INFO"):
# level_log = kwargs['level']
log_fn = FindExpFolder("VERSASTAT").PostDir.joinpath("PostEC_logger.log")
logging.basicConfig(
filename=log_fn,
filemode="w",
level=level_log,
format="%(asctime)s %(levelname)s, %(lineno)d: %(message)s",
)
logging.warning("Started logging for PostEC script...")
def applyParallel(dfGrouped, func):
with Pool(cpu_count() - 1) as p:
ret_list = p.map(func, [group for name, group in dfGrouped])
return ret_list
def check_status(file, verbose=False):
"""Check status will return (status,extra) of filename"""
PAR_file_test = Path(str(file)).stem
match = [
re.search("(?<!VERS|Vers)(AST|postAST|pAST)", str(a))
for a in PAR_file_test.split("_")
]
if any(match):
status = "EoL"
extra = [
a
for a in PAR_file_test.split("_")
if [i for i in match if i][0][0] in a
]
if verbose:
print(file, status, *extra)
return status, extra[0]
# if any([re.search(r'', i) for i in str(Path(str(file)).stem.split('_'))]):
else:
return "BoL", 0
# status =
# extra = [0]
# return status,extra
def postEC_Status(files, verbose=False):
# files = ['N2_HER_1500rpm_JOS6_pAST-sHA_285_#3_Disc_Parstat']
if len(files) > 1:
status_lst, extra_lst = [], []
for file in files:
status, extra = PostEC.check_status(file)
status_lst.append(status)
extra_lst.append(extra)
return status_lst, extra_lst
else:
return PostEC.check_status(files)
def OLD_PostOrganizeFolders(TakeRecentList=True):
postOVV = []
PostDestDir = FindExpFolder("VERSASTAT").DestDir.joinpath("PostEC")
PAR_version = FileOperations.version
RunOVV_fn_opts = list(
FindExpFolder("VERSASTAT").DestDir.rglob(
"RunOVV_v{0}.xlsx".format(PAR_version)
)
)
RunOVV_fn = [i for i in RunOVV_fn_opts if not "_Conflict" in i.stem][0]
if RunOVV_fn.is_file() and TakeRecentList == True:
OvvFromFile = pd.read_excel(RunOVV_fn, index_col=[0])
status, extra = PostEC.postEC_Status(OvvFromFile.PAR_file.values)
OvvFromFile = OvvFromFile.assign(
**{
"Date_PAR_EXP": OvvFromFile.PAR_date - OvvFromFile.EXP_date,
"Status": status,
"Extra": extra,
}
)
OnlyRecentMissingOVV = OvvFromFile
# OvvFromFile['Date_PAR_EXP'] = OvvFromFile.PAR_date-OvvFromFile.EXP_date
# OvvFromFile['Status'] = OvvFromFile.PAR_file.values
print("EC OVV loaded from file:{0}".format(RunOVV_fn))
OnlyRecentMissingOVV = FileOperations.ChangeRoot_DF(
OnlyRecentMissingOVV, ["Dest_dir", "EXP_dir", "PAR_file"]
)
# CS_parts_PDD = FileOperations.find_CS_parts(PostDestDir)
# CS_parts_pOVV = FileOperations.find_CS_parts(OnlyRecentMissingOVV.Dest_dir.iloc[0])
# chLst =[]
# if CS_parts_PDD[0] != CS_parts_pOVV[0]:
# chLst = [CS_parts_PDD[0].joinpath(FileOperations.find_CS_parts(i)[1]) for i in OnlyRecentMissingOVV.Dest_dir.values]
# OnlyRecentMissingOVV['Dest_dir'] = chLst
# else:
# pass
postOVVlst, outLst = [], []
postOVVcols = [
"DestFilename",
"SampleID",
"Status",
"Status_extra",
"Electrolyte",
"Gas",
"RPM",
"Scanrate",
"EXP_date",
"Type_Exp",
"SourceFilename",
"Exp_dir",
]
# postOVVout = PostEC.FromListgrp(group)
# postOVVlst = PostEC.applyParallel(OnlyRecentMissingOVV.groupby('Dest_dir'),PostEC.FromListgrp)
# postOVVlst = [outLst.append(PostEC.FromListgrp(i)) for i in OnlyRecentMissingOVV.groupby('Dest_dir')]
# for i in OnlyRecentMissingOVV.groupby('Dest_dir'):
# PostEC.FromListgrp(i)
# try:
# postOVVout = pd.DataFrame(postOVVlst,columns=)
# except Exception as e:
# postOVVout = pd.DataFrame(postOVVlst)
# for n,gr in OnlyRecentMissingOVV.groupby(by=['Dest_dir']):
# PostEC.FromListgrp(n,gr.EXP_dir.unique()[0])
# pass
# postOVVlst = [outLst.append(PostEC.FromListgrp(n,gr.EXP_dir.unique()[0])) for n,gr in OnlyRecentMissingOVV.groupby(by=['Dest_dir'])]
postOVVout = pd.concat(
[pd.DataFrame(i, columns=postOVVcols) for i in outLst],
sort=False,
ignore_index=True,
)
postOVVout.to_excel(PostDestDir.joinpath("postEC_Organized.xlsx"))
return postOVVout
class EnterExitLog:
def __init__(self, funcName):
self.funcName = funcName
def __enter__(self):
_logger.info(f"Started: {self.funcName}")
self.init_time = dt.datetime.now()
return self
def __exit__(self, type, value, tb):
self.end_time = dt.datetime.now()
self.duration = self.end_time - self.init_time
_logger.info(f"Finished: {self.funcName} in {self.duration} seconds")
def func_timer_decorator(func):
def func_wrapper(*args, **kwargs):
with EnterExitLog(func.__name__):
return func(*args, **kwargs)
return func_wrapper
def get_daily_pickle(exp_type=""):
today = dt.datetime.now().date()
_result = {"today": today}
if exp_type:
daily_pickle_path = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}.pkl.compress"
)
daily_pkl_options = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}.pkl.compress"
)
)
daily_pkl_options = sorted(daily_pkl_options, key=lambda x: x.stat().st_ctime)
_result.update(
{
"daily_path": daily_pickle_path,
"_exists": daily_pickle_path.exists(),
"daily_options": daily_pkl_options,
}
)
daily_pickle_path_RAW = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}_RAW.pkl.compress"
)
daily_pkl_options_RAW = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}_RAW.pkl.compress"
)
)
daily_pkl_options_RAW = sorted(
daily_pkl_options_RAW, key=lambda x: x.stat().st_ctime
)
_result.update(
{
"daily_path_RAW": daily_pickle_path_RAW,
"_raw_exists": daily_pickle_path_RAW.exists(),
"daily_options_RAW": daily_pkl_options_RAW,
}
)
if "EIS" in exp_type:
_result.update(
{
"daily_path_BRUTE": FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_BRUTE_{system()}.pkl.compress"
),
"daily_path_RAW_WB": FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_RAW_WB_{system()}.pkl.compress"
),
}
)
return _result
def _collect_test():
tt = CollectLoadPars(load_type="fast")
class CollectLoadPars:
def __init__(self, load_type="fast"):
self.load_type = load_type
self.load_pars()
self.collect_dict()
def load_pars(self):
_BaseLoad = BaseLoadPars()
_kws = {"EC_index": _BaseLoad.EC_index, "SampleCodes": _BaseLoad.SampleCodes}
if "fast" in self.load_type:
_kws.update(**{"reload": False, "reload_raw": False})
self.EIS_load = EIS_LoadPars(**_kws)
self.ORR_load = ORR_LoadPars(**_kws)
self.N2_load = N2_LoadPars(**_kws)
def collect_dict(self):
_load_attrs = [i for i in self.__dict__.keys() if i.endswith("_load")]
_collect = {}
for _load_pars in _load_attrs:
_pars_name = f'{_load_pars.split("_")[0]}_pars'
if hasattr(getattr(self, _load_pars), _pars_name):
_pars = getattr(getattr(self, _load_pars), _pars_name)
_collect.update({_pars_name: _pars})
self.pars_collection = _collect
class BaseLoadPars:
_required_funcs = [
"make_raw_pars_from_scratch",
"edit_raw_columns",
"search_pars_files",
"read_in_pars_files",
"extra_stuff_delegator",
]
def __init__(
self,
EC_index=pd.DataFrame(),
SampleCodes=pd.DataFrame(),
exp_type="",
reload=False,
reload_raw=False,
):
self.exp_type = exp_type
self._auto_set_exp_type()
self.EC_index = EC_index
self.SampleCodes = SampleCodes
self._check_class_req_functions()
self.check_EC_index()
self.set_OVV_exp_type()
self._reload = reload
self._reload_raw = reload_raw
self.get_daily_pickle()
if self.exp_type:
self.load_delegator()
def _auto_set_exp_type(self):
_cls_name = self.__class__.__name__
if "_" in _cls_name:
_cls_exp_type = _cls_name.split("_")[0]
_exp_type = f"{_cls_exp_type}_pars"
self.exp_type = _exp_type
def check_EC_index(self):
if self.EC_index.empty:
EC_index = ECRunOVV(load=1).EC_index
EC_index = FileOperations.ChangeRoot_DF(EC_index, [])
EC_index.PAR_file = EC_index.PAR_file.astype(str)
EC_index["Loading_cm2"] = EC_index["Loading_cm2"].round(3)
self.EC_index = EC_index
if self.SampleCodes.empty:
SampleCodes = FindExpFolder().LoadSampleCode()
self.SampleCodes = SampleCodes
# SampleCodesChar().load
def set_OVV_exp_type(self):
if not self.EC_index.empty and self.exp_type:
PAR_exp_uniq = self.EC_index.PAR_exp.unique()
PAR_match = [
parexp
for parexp in PAR_exp_uniq
if self.exp_type.split("_")[0] in parexp
]
self.exp_type_match = PAR_match
# if PAR_match:
EC_index_exp = self.EC_index.loc[self.EC_index.PAR_exp.isin(PAR_match)]
self.EC_index_exp = EC_index_exp
if EC_index_exp.empty:
_logger.error(f'set_OVV_exp_type "{self.__class__.__name__}" empty')
self.EC_index_exp_destdirs = EC_index_exp.Dest_dir.unique()
def get_daily_pickle(self):
exp_type = self.exp_type
today = dt.datetime.now().date()
_result = {"today": today}
if exp_type:
daily_pickle_path = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}.pkl.compress"
)
daily_pkl_options = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}.pkl.compress"
)
)
daily_pkl_options = sorted(
daily_pkl_options, key=lambda x: x.stat().st_ctime
)
_result.update(
{
"daily_path": daily_pickle_path,
"_exists": daily_pickle_path.exists(),
"daily_options": daily_pkl_options,
}
)
if not daily_pkl_options and not self._reload_raw:
self._reload_raw = True
daily_pickle_path_RAW = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}_RAW.pkl.compress"
)
_pickle_path_RAW_read_in = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{exp_type}_{system()}_RAW_read_in.pkl.compress"
)
daily_pkl_options_RAW = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}_RAW.pkl.compress"
)
)
daily_pkl_options_RAW = sorted(
daily_pkl_options_RAW, key=lambda x: x.stat().st_ctime
)
_result.update(
{
"daily_path_RAW": daily_pickle_path_RAW,
"_raw_exists": daily_pickle_path_RAW.exists(),
"daily_options_RAW": daily_pkl_options_RAW,
"pkl_path_RAW_read_in": _pickle_path_RAW_read_in,
}
)
if "EIS" in exp_type:
daily_pkl_options_RAW_WB = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}_RAW_WB.pkl.compress"
)
)
daily_pkl_options_RAW_WB = sorted(
daily_pkl_options_RAW_WB, key=lambda x: x.stat().st_ctime
)
_result.update(
{
"daily_path_BRUTE": FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}_BRUTE.pkl.compress"
),
"daily_path_RAW_WB": FindExpFolder(
"VERSASTAT"
).PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}_RAW_WB.pkl.compress"
),
"daily_options_RAW_WB": daily_pkl_options_RAW_WB,
}
)
self.daily_pickle_path = _result
def load_delegator(self):
setattr(self, self.exp_type, pd.DataFrame())
if self._reload:
if self._reload_raw:
self.make_raw_pars_from_scratch()
else:
self.read_in_daily_raw()
if hasattr(self, "edit_raw_columns"):
try:
self.edit_raw_columns()
except Exception as e:
_logger.warning(
f'edit_raw_columns in load_delegator "{self.__class__.__name__}" {self.exp_type} failed because {e}'
)
self.save_daily_pars()
else:
self.read_in_daily_pars()
try:
self.extra_stuff_delegator()
except Exception as e:
_logger.warning(
f'extra_stuff_delegator "{self.__class__.__name__}" {self.exp_type} failed because {e}'
)
def _check_class_req_functions(self):
for _f in self._required_funcs:
if not hasattr(self, _f) and "BaseLoadPars" not in self.__class__.__name__:
_logger.warning(
f'Class "{self.__class__.__name__}" is missing required func: "{_f}"'
)
def save_daily_pars(self):
pars = getattr(self, self.exp_type)
pars.to_pickle(self.daily_pickle_path["daily_path"])
_logger.info(
f'{self.exp_type} len({len(pars)}) OVV to daily pickle: {self.daily_pickle_path.get("daily_path")}'
)
def read_in_daily_pars(self):
if self.daily_pickle_path.get("daily_options"):
_pars_fp = self.daily_pickle_path.get("daily_options")[-1]
_logger.info(
f"start read_in_daily_pars {self.exp_type} pars OVV from daily {_pars_fp} "
)
_pars = pd.read_pickle(_pars_fp)
try:
_pars = FileOperations.ChangeRoot_DF(_pars, [], coltype="string")
setattr(self, self.exp_type, _pars)
_logger.info(f"Loaded {self.exp_type} pars OVV from daily {_pars_fp} ")
except Exception as e:
_pars = pd.DataFrame()
_logger.error(
f" ERROR in Loaded {self.exp_type} pars OVV from daily {_pars_fp} {e} "
)
else:
_pars = pd.DataFrame()
_pars_fp = "options empty list"
if _pars.empty:
_logger.error(
f" ERROR in Loaded {self.exp_type} pars OVV from daily {_pars_fp}: empty "
)
def reload_raw_df_delegator(self):
_raw_read_fp = self.daily_pickle_path.get("pkl_path_RAW_read_in")
if _raw_read_fp.exists() and not (self._reload or self._reload_raw):
_pars_RAW_read_in = pd.read_pickle(_raw_read_fp)
setattr(self, f"{self.exp_type}_RAW", _pars_RAW_read_in)
else:
self.generate_raw_df()
self.reload_raw_df()
_pars_RAW_read_in = getattr(self, f"{self.exp_type}_RAW")
_pars_RAW_read_in.to_pickle(_raw_read_fp)
def read_in_daily_raw(self):
_raw_fp = self.daily_pickle_path.get("daily_options_RAW")[-1]
_pars_RAW = pd.read_pickle(_raw_fp)
_pars_RAW.sort_values("source_delta_mtime", inplace=True)
if not "level_0" in _pars_RAW.columns:
_pars_RAW = _pars_RAW.reset_index()
setattr(self, f"{self.exp_type}_RAW", _pars_RAW)
_logger.info(f"Loaded raw df {self.exp_type} from daily {_raw_fp} ")
def save_daily_raw(self):
_pars_RAW = getattr(self, f"{self.exp_type}_RAW")
_pars_RAW.to_pickle(self.daily_pickle_path.get("daily_path_RAW"))
_logger.info(
f'{self.exp_type} OVV to daily pickle: {self.daily_pickle_path.get("daily_path_RAW")}'
)
def set_gen_raw_fls(self):
_par_files = [
list(self.search_pars_files(d)) for d in self.EC_index_exp_destdirs
]
self._par_files = _par_files
if not _par_files:
_logger.warning(f"{self.exp_type} set_gen_raw_fls: list empty ")
self._par_fls_gen = (a for i in self._par_files for a in i)
@func_timer_decorator
def generate_raw_df(self):
if not hasattr(self, "_par_fls_gen"):
self.set_gen_raw_fls()
_pars_lst = list(self.read_in_pars_files(self._par_fls_gen))
try:
_pars_RAW = pd.concat(_pars_lst, sort=False)
except Exception as e:
_pars_RAW = pd.DataFrame()
_logger.warning(f"{self.exp_type} generate_raw_df: {e}")
setattr(self, f"{self.exp_type}_RAW", _pars_RAW)
@staticmethod
def get_source_meta(filepath):
i = filepath
_source_mtime = dt.datetime.fromtimestamp(i.stat().st_mtime)
_delta_mtime = dt.datetime.now() - _source_mtime
_meta_res = {
"sourceFilename": i,
"source_mtime": _source_mtime,
"source_delta_mtime": _delta_mtime,
"sourcebasename": i.stem,
}
return _meta_res
def extra_stuff_delegator(self):
_extra_funcs = [i for i in self.__dict__.keys() if i.startswith("_extra")]
for _func in _extra_funcs:
try:
func = getattr(self, _func)
func()
# self._extra_plotting()
except Exception as e:
_logger.info(
f"{self.__class__.__name__} Extra stuff failed because {e}"
)
def _testing():
tt = EIS_LoadPars(reload=False, reload_raw=False)
tt._reload_raw
self = tt
self.load_delegator()
self.make_raw_pars_from_scratch()
class EIS_LoadPars(BaseLoadPars):
col_names = ["File_SpecFit", "File_SpecRaw", "PAR_file"]
def __init__(
self,
EC_index=pd.DataFrame(),
SampleCodes=pd.DataFrame(),
exp_type="EIS_pars",
BRUTE_out=False,
**kws,
):
self.BRUTE_out = BRUTE_out
super().__init__(
EC_index=EC_index, SampleCodes=SampleCodes, exp_type=exp_type, **kws
)
def read_in_pars_files(self, _genlist):
# _ps = Path(d).rglob(f'*_pars_v{FileOperations.version}.xlsx' )
while True:
try:
i = next(_genlist)
if i.name.endswith("xlsx"):
_pp = pd.read_excel(i, index_col=[0])
elif i.name.endswith("pkl"):
_pp = pd.read_pickle(i)
_pp = FileOperations.ChangeRoot_DF(_pp, [], coltype="string")
_meta = self.get_source_meta(i)
_pp = _pp.assign(**_meta)
yield _pp
except StopIteration:
return "all done"
print("gen empty")
def search_pars_files(self, _dest_dir):
return Path(_dest_dir.joinpath("EIS")).rglob(
f"*_pars_v{FileOperations.EIS_version}.xlsx"
)
@func_timer_decorator
def make_raw_pars_from_scratch(self):
_logger.info(
f'Reloading raw extra steps "{self.__class__.__name__}" {self.exp_type}'
)
self.reload_raw_df_delegator()
self._load_WB_delegator()
self._merge_WB_pars_raw()
self._raw_finish_edit_columns()
self.save_daily_raw()
def reload_raw_df_delegator(self):
_raw_read_fp = self.daily_pickle_path.get("pkl_path_RAW_read_in")
if _raw_read_fp.exists() and not (self._reload or self._reload_raw):
EIS_pars_RAW_read_in = pd.read_pickle(_raw_read_fp)
setattr(self, f"{self.exp_type}_RAW", EIS_pars_RAW_read_in)
else:
self.generate_raw_df()
self.reload_raw_df()
EIS_pars_RAW_read_in = getattr(self, f"{self.exp_type}_RAW")
EIS_pars_RAW_read_in.to_pickle(_raw_read_fp)
def reload_raw_df(self):
_pars_RAW = getattr(self, f"{self.exp_type}_RAW")
_pars_RAW.sort_values("source_delta_mtime", inplace=True)
_pars_RAW = _pars_RAW.reset_index()
setattr(self, f"{self.exp_type}_RAW", _pars_RAW)
self._raw_extra_steps()
_logger.info(f'Reloading "{self.__class__.__name__}" {self.exp_type}')
# self.EIS_pars_RAW = EIS_pars_RAW
def _raw_extra_steps(self):
_logger.info(
f'Reloading raw extra steps "{self.__class__.__name__}" {self.exp_type}'
)
EIS_pars_all = getattr(self, f"{self.exp_type}_RAW")
float_cols = set(
[
a
for i in EIS_pars_all.lmfit_var_names.unique()
if type(i) == str and not "(" in i
for a in i.split(", ")
]
)
float_cols.update(
set(
[a for i in float_cols for a in EIS_pars_all.columns if a.startswith(i)]
)
)
EIS_pars_all[list(float_cols)] = EIS_pars_all[list(float_cols)].fillna(0)
# EIS_pars_all[list(float_cols)] = EIS_pars_all[list(float_cols)].astype(float)
obj_flt_cols = [
i
for i in EIS_pars_all.columns
if str(EIS_pars_all[i].dtype) == "object" and i in float_cols
]
EIS_pars_all[obj_flt_cols] = EIS_pars_all[obj_flt_cols].replace("", 0)
EIS_pars_all[list(float_cols)] = EIS_pars_all[list(float_cols)].astype(float)
wrong_fls = [
EIS_pars_all.loc[EIS_pars_all[i].astype(str).str.contains("Parameter")]
for i in obj_flt_cols
]
if wrong_fls:
wrong_objflt_df = pd.concat(wrong_fls)
fix_dct = {
i: [
float(v.split("value=")[-1].split(",")[0])
for v in wrong_objflt_df[i].values
]
for i in obj_flt_cols
}
fixed_objflt_df = wrong_objflt_df.assign(**fix_dct)
EIS_pars_all = pd.concat(
[
EIS_pars_all.drop(index=wrong_objflt_df.index, axis=0),
fixed_objflt_df,
],
axis=0,
sort=True,
)
setattr(self, f"{self.exp_type}_RAW", EIS_pars_all)
def _load_WB_delegator(self):
daily_options_WB = self.daily_pickle_path.get("daily_options_RAW_WB")
if daily_options_WB:
_WB_RAW_daily_path = daily_options_WB[-1]
if _WB_RAW_daily_path.exists() and not (self._reload or self._reload_raw):
_EIS_WB_pars_all = pd.read_pickle(_WB_RAW_daily_path)
setattr(self, f"{self.exp_type}_WB", _EIS_WB_pars_all)
else:
self.reload_raw_WB_df()
else:
self.reload_raw_WB_df()
def reload_raw_WB_df(self):
_logger.info(f'Reloading "{self.__class__.__name__}" {self.exp_type} WB')
_EIS_WB_files = [
list(Path(d.joinpath("EIS/lin_Warburg")).rglob(f"lin_Warburg*.pkl"))
for d in self.EC_index_exp_destdirs
]
self._EIS_WB_files = _EIS_WB_files
self._EIS_WB_fls = (a for i in _EIS_WB_files for a in i)
_WB_lst = list(self.read_in_pars_files(self._EIS_WB_fls))
_EIS_WB_pars_all = pd.concat(_WB_lst, sort=False, ignore_index=True)
setattr(self, f"{self.exp_type}_WB", _EIS_WB_pars_all)
_EIS_WB_pars_all.to_pickle(self.daily_pickle_path.get("daily_path_RAW_WB"))
def _merge_WB_pars_raw(self):
_EIS_WB_pars_all = getattr(self, f"{self.exp_type}_WB")
EIS_pars_all = getattr(self, f"{self.exp_type}_RAW")
_diffcols = set(EIS_pars_all.columns).difference(_EIS_WB_pars_all.columns)
_mcols = [
i
for i in set(EIS_pars_all.columns).intersection(_EIS_WB_pars_all.columns)
if i
not in [
"sourceFilename",
"source_mtime",
"source_delta_mtime",
"sourcebasename",
]
]
_dtype_mismatch = [
(i, EIS_pars_all[i].dtype, _EIS_WB_pars_all[i].dtype)
for i in _mcols
if EIS_pars_all[i].dtype != _EIS_WB_pars_all[i].dtype
]
if _dtype_mismatch:
_excl = []
for i in _dtype_mismatch:
try:
_EIS_WB_pars_all[i[0]] = _EIS_WB_pars_all[i[0]].astype(i[1])
except Exception as e:
_excl.append(i[0])
print(i, "\n", e)
_mcols = [i for i in _mcols if i not in _excl]
# EIS_pars_all[i[0]] = EIS_pars_all[i[0]].astype(i[2])
_merge = pd.merge(
EIS_pars_all, _EIS_WB_pars_all, on=_mcols, how="left", suffixes=("", "_WB")
)
if not _merge.empty:
return _merge
else:
print("WB merge was empty")
return EIS_pars_all
setattr(self, f"{self.exp_type}_RAW", EIS_pars_all)
def _raw_finish_edit_columns(self):
# EIS_pars_all = self._merge_WB_pars_raw(EIS_pars_all)
EIS_pars_all = getattr(self, f"{self.exp_type}_RAW")
EIS_pars_all = EIS_pars_all.assign(
**{
"EIS_fake": [
"fakeZmean" in Path(i).name
for i in EIS_pars_all.PAR_file.to_numpy()
]
}
)
_not_in_index = EIS_pars_all.loc[
(
~(EIS_pars_all.PAR_file.isin(self.EC_index.PAR_file.values))
& (~EIS_pars_all.EIS_fake == True)
)
]
CleanUpCrew(list_of_files=_not_in_index.sourceFilename.unique(), delete=True)
EIS_pars_all = EIS_pars_all.iloc[
~(EIS_pars_all.index.isin(_not_in_index.index))
]
EIS_pars_all = Load_from_Indexes.test_update_from_index(
EIS_pars_all, self.EC_index
)
setattr(self, f"{self.exp_type}_RAW", EIS_pars_all)
def edit_raw_columns(self):
EIS_pars_all = getattr(self, f"{self.exp_type}_RAW")
# EIS_pars_RAW = self._raw_extra_steps(EIS_pars_RAW)
E_dc_RHE_cols = [
(np.round(i, 3), np.round(i, 3) * 1e3) for i in EIS_pars_all[EvRHE].values
]
EIS_pars_all = EIS_pars_all.assign(
**{
"E_dc_RHE": [i[0] for i in E_dc_RHE_cols],
"E_dc_RHE_mV": [i[1] for i in E_dc_RHE_cols],
}
)
EIS_pars_recent = EIS_pars_all.loc[
(EIS_pars_all.source_mtime > pd.Timestamp(dt.date(2020, 11, 25)))
& (EIS_pars_all.PAR_file.str.contains("None") == False)
]
EIS_pars_undup = EIS_pars_recent.dropna(subset=self.col_names).drop_duplicates(
keep="first"
)
# === POST EDITING OF LOADED PARS ===
EIS_pars_undup = EIS_pars_undup.assign(
**{"Loading_cm2": EIS_pars_undup["Loading_cm2"].round(3)}
)
EIS_pars_undup = post_helper.make_uniform_EvRHE(EIS_pars_undup)
EIS_pars_undup = CollectPostOVV.MatchECconditions(EIS_pars_undup)
# EIS_pars_undup = Load_from_Indexes.add_missing_ECindex_cols(EC_index, EIS_pars_undup)
_oc_OVV = list(EIS_pars_undup.columns.intersection(self.EC_index_exp.columns))
if not set(self.EC_index_exp.groupby(_oc_OVV).groups.keys()).intersection(
EIS_pars_undup.groupby(_oc_OVV).groups.keys()
):
_drpcols = [
a
for a in EIS_pars_undup.columns
if (
a in [i for i in _oc_OVV if i not in "PAR_file"]
or "_".join(a.split("_")[0:-1])
in [i for i in _oc_OVV if i not in "PAR_file"]
)
]
# EIS_pars_undup.drop(columns =_drpcols)
EIS_pars_undup = Load_from_Indexes.add_missing_ECindex_cols(
self.EC_index, EIS_pars_undup.drop(columns=_drpcols)
)
# EIS_pars_undup = pd.merge(EIS_pars_undup,EIS_OVV,on=_oc_OVV, how='left')
_oc_SC = list(EIS_pars_undup.columns.intersection(self.SampleCodes.columns))
EIS_pars_undup = pd.merge(
EIS_pars_undup, self.SampleCodes, how="left", on=_oc_SC
)
EIS_pars_BRUTE = EIS_pars_undup.loc[
(EIS_pars_undup.BRUTE_FIT == 1) | (EIS_pars_undup.FINAL_FIT == 0)
]
if self.BRUTE_out:
EIS_pars_BRUTE.to_pickle(eis_daily["daily_path_BRUTE"])
EIS_pars = EIS_pars_undup.loc[(EIS_pars_undup.FINAL_FIT == 1)]
EIS_pars = EIS_extra_methods.add_best_model_per_spectrum(EIS_pars)
setattr(self, self.exp_type, EIS_pars)
# def extra_stuff_delegator(self):
# try:
# self._extra_best_models()
# self._extra_plotting()
# except Exception as e:
# _logger.info(f'{self.__class__.__name__} Extra stuff failed because {e}')
def _extra_best_models(self):
_err_type = "lmfit_MSE"
_filter = "(EIS_pars.lmfit_MSE < 65E4) & (EIS_pars.Rct < 2E3) & (EIS_pars.Rct > 2E-2) \
& (EIS_pars.Rs > 0.01) & (EIS_pars.Rs < 200) & (EIS_pars.Cdlp < 0.075)\
& (EIS_pars.lmfit_redchi < 1E3) & (EIS_pars.Aw < 10E3) & (EIS_pars.Aw > 10E-2)\
& (EIS_pars.Qad < 1) & (EIS_pars.tau < 1E3)"
_filter += '& (EIS_pars.SampleID.str.contains("JOS1|JOS2|JOS3|JOS4|JOS5"))'
_filter += "& (EIS_pars.EIS_fake == False)"
_grps = ["Model_EEC", "Gas", "lmfit_var_names"][0:2]
EIS_pars = self.EIS_pars
best_models = (
EIS_pars.loc[eval(_filter)]
.dropna(axis=0, subset=[_err_type])
.groupby(_grps)[_err_type]
.agg(["count", "mean", "std"])
.sort_values(["Gas", "mean"], ascending=True)
)
print(best_models)
keep_models = (
best_models.loc[(best_models["count"] > 5) & (best_models["std"] > 0)]
.index.get_level_values(0)
.unique()
)
EIS_pars = EIS_pars.loc[EIS_pars.Model_EEC.isin(keep_models)]
if hasattr(EIS_pars, "best_mod_name"):
# EIS_best_mods = EIS_pars.loc[EIS_pars.Model_EEC_name.isin([i for i in EIS_pars.best_mod_name.unique() if not pd.isna(i)])]
EIS_best_mods = EIS_pars.loc[
EIS_pars.index.isin(
[i for i in EIS_pars.best_mod_n.unique() if not pd.isna(i)]
)
]
self.EIS_pars_best_mods = EIS_best_mods
_agg = (
EIS_best_mods.dropna(subset=[_err_type])
.groupby(_grps + ["E_RHE"])[_err_type]
.agg(["count", "mean", "std"])
)
_agg_best = _agg.loc[_agg["count"] > 3].sort_values(
["Gas", "E_RHE", "mean"], ascending=True
)
def _extra_plotting(self):
if hasattr(self, "EIS_pars_best_mods"):
self.EIS_pars_best_mods.query("pH < 15").plot(
y="Qad",
x="E_RHE",
c="pH",
colormap="rainbow_r",
kind="scatter",
ylim=(0, 0.05),
)
self.EIS_pars_best_mods.query("pH < 15").plot(
y="Rs",
x="E_RHE",
c="pH",
colormap="rainbow_r",
kind="scatter",
ylim=(0, 80),
)
self.EIS_pars_best_mods.query("pH < 15").plot(
y="Rs",
x="R_ion",
c="E_RHE",
colormap="rainbow_r",
kind="scatter",
ylim=(0, 80),
xlim=(0.1, 2e3),
logx=True,
)
def _testing():
t2 = ORR_LoadPars(reload=True, reload_raw=True)
tf2 = ORR_LoadPars(reload=False, reload_raw=False)
t2._reload_raw
self = tf2
self.load_delegator()
self.make_raw_pars_from_scratch()
class ORR_LoadPars(BaseLoadPars):
read_types = ["ORR_pars", "KL_pars"]
def __init__(
self,
EC_index=pd.DataFrame(),
SampleCodes=pd.DataFrame(),
exp_type="ORR_pars",
BRUTE_out=False,
**kws,
):
self.BRUTE_out = BRUTE_out
super().__init__(
EC_index=EC_index, SampleCodes=SampleCodes, exp_type=exp_type, **kws
)
def read_in_pars_files(self, _genlist):
# _ps = Path(d).rglob(f'*_pars_v{FileOperations.version}.xlsx' )
while True:
try:
i = next(_genlist)
# _source_mtime = dt.datetime.fromtimestamp(i.stat().st_mtime)
# _delta_mtime = dt.datetime.now() - _source_mtime
_i_stem = i.stem
_pparts = i.parent.parts
if "KL" == _pparts[-1]:
if _i_stem.startswith("KL_"):
_type = "KL_data"
else:
_type = "KL_unknown"
elif "RingDisk" == _pparts[-1]:
_type = "ORR_ringdisk"
elif "TAFEL" == _pparts[-1]:
_type = "Tafel"
else:
if _i_stem.startswith("ORR_pars"):
_type = "ORR_pars"
elif _i_stem.startswith("KL_pars"):
_type = "KL_pars"
elif _i_stem.startswith("O2_ORR") and _i_stem.endswith(
f"_RRDE_v{FileOperations.version}"
):
_type = "ORR_RRDE"
else:
_type = "O2_ORR_unknown"
_meta = self.get_source_meta(i)
_meta.update({"source_type": _type})
if _type in self.read_types:
_pp = pd.read_excel(i, index_col=[0])
_pp = FileOperations.ChangeRoot_DF(_pp, [], coltype="string")
_pp = _pp.assign(**_meta)
else:
_pp = pd.DataFrame(_meta, index=[0])
# _meta.update({'DF' : _pp})
yield _pp
except StopIteration:
return "all done"
print("gen empty")
@func_timer_decorator
def make_raw_pars_from_scratch(self):
_logger.info(
f'Reloading raw extra steps "{self.__class__.__name__}" {self.exp_type}'
)
self.reload_raw_df_delegator()
if hasattr(self, "_raw_finish_edit_columns"):
self._raw_finish_edit_columns()
self.save_daily_raw()
def search_pars_files(self, dest_dir):
return Path(dest_dir.joinpath(f"ORR_v{FileOperations.version}")).rglob("*xlsx")
def reload_raw_df(self):
_pars_RAW = getattr(self, f"{self.exp_type}_RAW")
_pars_RAW.sort_values("source_delta_mtime", inplace=True)
_pars_RAW = _pars_RAW.reset_index()
setattr(self, f"{self.exp_type}_RAW", _pars_RAW)
# self._raw_extra_steps()
_logger.info(f'Reloading "{self.__class__.__name__}" {self.exp_type}')
# self.EIS_pars_RAW = EIS_pars_RAW
def edit_raw_columns(self):
### Fixing the pars after loading...
# TODO : taking out duplicates based on time_since_run....
ORR_pars_char = getattr(self, f"{self.exp_type}_RAW")
# Load_na = ORR_pars_char.loc[(ORR_pars_char.Loading_cm2.isna()) & (ORR_pars_char.PAR_file.isna() == False)]
# if not Load_na.empty:
# Load_na_missingvalues =[(n,*GetSampleID.ink_loading_from_filename(i.PAR_file)) for n,i in Load_na.iterrows()]
# Load_na_vals = pd.DataFrame(Load_na_missingvalues).rename(columns={1 : 'Loading_name',2 : 'Loading_cm2'}).set_index([0])
# ORR_pars_char.Loading_cm2.fillna(value=Load_na_vals.Loading_cm2,inplace=True)
# # ORR_char_merge_cols = [i for i in ORR_pars.columns if i in SampleCodes.columns]
ORR_pars_char = ORR_pars_char.drop(
columns=[i for i in ORR_pars_char.columns if "Unnamed" in i]
)
if not ORR_pars_char.loc[ORR_pars_char.Loading_cm2.isna()].empty:
_loading_cols = ["Loading_cm2", "Loading_name", "Loading_date"]
ORR_pars_char = ORR_pars_char.drop(columns=_loading_cols)
ORR_pars_char = pd.merge(
ORR_pars_char,
self.EC_index[["PAR_file"] + _loading_cols],
on="PAR_file",
how="left",
)
ORR_pars_char.Loading_cm2 = ORR_pars_char.Loading_cm2.fillna(
value=0.379
) # fillna for Loading_cm2
ORR_pars_char.Loading_cm2 = ORR_pars_char.Loading_cm2.round(3)
if ORR_pars_char.postAST.dropna().empty:
ORR_pars_char = ORR_pars_char.drop(columns="postAST")
# _int = list(set(ORR_pars_char.columns).intersection(set(EC_index.columns)))
ORR_pars_char = pd.merge(
ORR_pars_char,
self.EC_index[["PAR_file", "postAST"]],
on="PAR_file",
suffixes=("", ""),
)
ORR_pars_char = make_uniform_RPM_DAC(ORR_pars_char)
setattr(self, f"{self.exp_type}", ORR_pars_char)
# def extra_stuff_delegator(self):
# try:
# self._extra_plotting()
# except Exception as e:
# _logger.info(f'{self.__class__.__name__} Extra stuff failed because {e}')
def _extra_plotting(self):
ORR_pars_char = getattr(self, f"{self.exp_type}")
for swp, swgrp in ORR_pars_char.query("(pH < 14) & (RPM_DAC > 900)").groupby(
"Sweep_Type"
):
fig, (ax1, ax2) = plt.subplots(figsize=(10, 4), ncols=2)
# plt.figure()
swgrp.plot(
y="ORR_Jkin_min_750",
x="ORR_E_onset",
c="pH",
title=f"{swp}",
kind="scatter",
logy=True,
colormap="rainbow_r",
ylim=[0.1, 50],
xlim=(0.5, 1),
ax=ax1,
)
ax1.set_xlabel("E onset / mV_RHE")
swgrp.plot(
y="ORR_Frac_H2O2_600",
x="ORR_E_onset",
c="pH",
title=f"{swp}",
kind="scatter",
logy=True,
colormap="rainbow_r",
ylim=[0.1, 100],
xlim=(0.5, 1),
ax=ax2,
)
# ax2.set_xlabel('E onset / mV_RHE')
plt.suptitle("ORR with E_onset")
plt.show()
fig, (ax1, ax2) = plt.subplots(figsize=(10, 4), ncols=2)
swgrp.plot(
y="ORR_E_onset",
x="N2_BG_lin_slope",
c="pH",
title=f"{swp}",
kind="scatter",
logy=True,
logx=True,
colormap="rainbow_r",
xlim=[0.01, 4],
ylim=(0.5, 1),
ax=ax1,
)
swgrp.plot(
y="ORR_Jkin_min_750",
x="N2_BG_lin_slope",
c="pH",
title=f"{swp}",
kind="scatter",
logy=True,
logx=True,
colormap="rainbow_r",
xlim=[0.01, 4],
ylim=(0.001, 50),
ax=ax2,
)
# ax2.set_xlabel('E onset / mV_RHE')
plt.suptitle("ORR with N2_BG lin slope")
plt.show()
plt.close()
def _N2_testing():
n2 = N2_LoadPars(reload=True, reload_raw=True)
n2r = N2_LoadPars(reload=True, reload_raw=False)
class N2_LoadPars(BaseLoadPars):
def __init__(
self,
EC_index=pd.DataFrame(),
SampleCodes=pd.DataFrame(),
exp_type="",
BRUTE_out=False,
**kws,
):
self.BRUTE_out = BRUTE_out
super().__init__(
EC_index=EC_index, SampleCodes=SampleCodes, exp_type=exp_type, **kws
)
@func_timer_decorator
def make_raw_pars_from_scratch(self):
_logger.info(
f'Reloading raw extra steps "{self.__class__.__name__}" {self.exp_type}'
)
self.reload_raw_df_delegator()
if hasattr(self, "_raw_finish_edit_columns"):
self._raw_finish_edit_columns()
self.save_daily_raw()
def _old(self):
IndexOVV_N2_pars_fn = FindExpFolder("VERSASTAT").PostDir.joinpath(
"N2Cdl_pars_IndexOVV_v{0}.pkl.compress".format(FileOperations.version)
)
n2_daily = get_daily_pickle(exp_type="N2_all")
if n2_daily.get("_exists", False) and reload != True:
# Cdl_pars_char = pd.read_excel(IndexOVV_N2_pars_fn,index_col=[0])
Cdl_pars_char = pd.read_pickle(n2_daily.get("daily_path"))
Cdl_pars_char = FileOperations.ChangeRoot_DF(
Cdl_pars_char, [], coltype="string"
)
else:
# @@ Check POST_AST status from OVV and PRM
_logger.info(
f'START reloading N2_pars OVV from daily {n2_daily["today"]:%Y-%m-%d}'
)
# EC_index = ECRunOVV(load=1).index
# ['EXP_dir','Dest_dir','PAR_file','PAR_file_Ring', 'ORR_act_N2_bg','DestFile']
# EC_index = FileOperations.ChangeRoot_DF(OnlyRecentMissingOVV,[])
# OnlyRecentMissingOVV.PAR_file = OnlyRecentMissingOVV.PAR_file.astype(str)
# OnlyRecentMissingOVV['Loading_cm2'] = OnlyRecentMissingOVV['Loading_cm2'].round(3)
# SampleCodes = SampleCodesChar().load
# EC_index, SampleCodes = Load_from_Indexes.get_EC_index()
# def read_df(_par_fls, ):
# _ps = Path(d).rglob(f'*_pars_v{FileOperations.version}.xlsx' )
def search_pars_files(self, destdir):
return Path(destdir.joinpath(f"N2_scans_v{FileOperations.version}")).rglob(
"*.xlsx"
)
def read_in_pars_files(self, _genlist, read_types=["Cdl_data", "Cdl_pars"]):
while True:
try:
i = next(_genlist)
_i_stem = i.stem
_meta = self.get_source_meta(i)
if _i_stem.endswith("_BG"):
_N2_type = "BG"
else:
if _i_stem.startswith("CV_"):
_N2_type = "CV"
if _i_stem.endswith(f"_first_v{FileOperations.version}"):
_N2_type = "CV_first"
# if not 'Scan Rate' in _pp.columns:
# 'N2_CV_raw = N2_CV_raw.assign(**{'ScanRate' : [i.split(f'_v{FileOperations.version}')[0].split('_')[-1] for i in N2_CV_raw.basename.to_numpy()]})
elif _i_stem.startswith("Cdl_data_"):
_N2_type = "Cdl_data"
elif _i_stem.startswith("Cdl_pars"):
_N2_type = "Cdl_pars"
else:
_N2_type = "N2_unknown"
_meta.update({"N2_type": _N2_type})
if _N2_type in read_types:
_pp = pd.read_excel(i, index_col=[0])
_pp = FileOperations.ChangeRoot_DF(_pp, [], coltype="string")
_pp = _pp.assign(**_meta)
else:
_pp = pd.DataFrame(_meta, index=[0])
# _meta.update({'DF' : _pp})
yield _pp
except StopIteration:
return "all done"
print("gen empty")
def reload_raw_df(self):
_pars_RAW = getattr(self, f"{self.exp_type}_RAW")
if not _pars_RAW.empty:
_pars_RAW.sort_values("source_delta_mtime", inplace=True)
_pars_RAW = _pars_RAW.reset_index()
setattr(self, f"{self.exp_type}_RAW", _pars_RAW)
_logger.info(
f'Reloading "{self.__class__.__name__}" {self.exp_type} len({len(_pars_RAW)}'
)
def _old_stuff():
if n2_daily.get("_raw_exists", False) and use_daily is True:
N2_pars_all = pd.read_pickle(n2_daily.get("daily_path_RAW"))
elif n2_daily.get("daily_options_RAW", False) and use_daily is True:
if n2_daily.get("daily_options_RAW")[-1]:
N2_pars_all = pd.read_pickle(n2_daily.get("daily_options_RAW")[-1])
else: # Construct new N2 pars ovv from reading in files
N2_OVV = EC_index.loc[EC_index.PAR_exp == "N2_act"]
_par_files = [
list(Path(d.joinpath("N2_scans_v30")).rglob("*.xlsx"))
for d in N2_OVV.Dest_dir.unique()
]
_par_fls = (a for i in _par_files for a in i) # if 'EIS' in a.name)
_par_reads = read_df(_par_fls, read_types=["Cdl_data", "Cdl_pars"])
N2_pars_all = pd.concat([i["DF"] for i in _par_reads], sort=False)
for n, gr in N2_pars_all.groupby("PAR_file"):
print(
n,
f'\nSamples: {", ".join([str(i) for i in gr.SampleID.unique()])}',
",".join(gr.N2_type.unique()),
)
N2_pars_all, _missing_index = Load_from_Indexes.check_missing_ECindex(
EC_index, N2_pars_all, clean_up=True
)
N2_pars_all.to_pickle(n2_daily["daily_path_RAW"])
def _extra_pivot_CV(self):
N2_type_grps = N2_pars_all.groupby("N2_type")
if "CV" in N2_type_grps.groups.keys():
# N2 CVs TODO add Scan Rate column
N2_CV_raw = N2_type_grps.get_group("CV").dropna(axis=1, how="all")
# N2_CV_raw.plot(x=EvRHE,y='jmAcm-2')
N2_CV_pivot_SR_lst = []
for PF, PFgr in N2_CV_raw.groupby("PAR_file"):
# PF ,PFgr
for swp, swgrp in PFgr.groupby("Sweep_Type"):
# swp, swgrp
# swgrp.plot(x=EvRHE,y='jmAcm-2')
# E_T_idx = pd.MultiIndex.from_tuples(zip(swgrp['Elapsed Time(s)'].to_numpy(),swgrp[EvRHE].to_numpy()),names=['Elapsed_Time_s',EvRHE])
# swgrp.index = E_T_idx
# {n : len(gr) for n,gr in swgrp.groupby('Segment #')}
pvt = swgrp.pivot(
index="Elapsed Time(s)",
columns="ScanRate_mVs",
values=[EvRHE, "jmAcm-2", "Segment #"],
)
# pvt = swgrp.pivot(index=EvRHE,columns='ScanRate_mVs',values='jmAcm-2')
pvt.columns = pd.MultiIndex.from_tuples(
[(f"{i[0]}_{int(i[1])}", i[1]) for i in pvt.columns]
)
# pvt.rename(columns=pd.MultiIndex.from_tuples([(f'{i[0]}_{int(i[1])}', i[1]) for i in pvt.columns],names=['data','ScanRate_mVs']),inplace=True)
indx = pd.MultiIndex.from_tuples(
zip(repeat(PF), repeat(swp), pvt.index),
names=["PAR_file", "Sweep_Type", EvRHE],
)
pvt.index = indx
N2_CV_pivot_SR_lst.append(pvt)
# for sr, srgrp in PFgr.groupby('ScanRate_mVs'):
# SR = int(sr)
N2_CV_pivot_SR = pd.concat(N2_CV_pivot_SR_lst, sort=False)
# N2Cdl_pars_index = N2_grps.groupby('N2_type').get_group('Cdl_pars')
# N2Cdl_pars_files = [Path(i) for i in N2Cdl_pars_index['SourceFilename'].unique() if re.search('(?i)(_pars|_v20)',Path(i).stem) and Path(i).exists()]
# cdl = pd.read_excel(N2Cdl_pars_files[0],index_col=[0])
# N2Cdl_pars.rename(columns={'Filename' : 'PAR_file'})
# EPtest = N2Cdl_pars_index.loc[no_match] # a slice for testing purpose
# pd.merge(N2Cdl_pars_raw,N2_CV_index[['PAR_file','DestFile']],on='PAR_file',how='left')
# N2Cdl_pars_raw = N2_type_grps.get_group('Cdl_pars').dropna(axis=1,how='all')
# N2Cdl_data_index = postOVVout.groupby('Type_output').get_group('N2_Cdl_data')
# N2_CV_index = postOVVout.groupby('Type_output').get_group('N2_CV')
# lst, no_match, non_exist = [],[],[]
# for n,r in N2Cdl_pars_raw.iterrows():
# Cdl_data_file = N2Cdl_data_index.loc[N2Cdl_data_index.PAR_file == r.PAR_file].DestFile.unique()
# CV_files = N2_CV_index.loc[N2_CV_index.PAR_file == r.PAR_file].DestFile.unique()
# lst.append([set(Cdl_data_file),set(CV_files)])
# if len(N2Cdl_pars_raw) == len(lst):
# N2Cdl_pars_raw = N2Cdl_pars_raw.assign(**{'Cdl_data_file' : [i[0] for i in lst], 'Cdl_CV_data_files' : [i[1] for i in lst]})
# Cdl_pars = pd.concat([i for i in lst],sort=False,ignore_index=True)
def edit_raw_columns(self):
N2Cdl_pars_raw = getattr(self, f"{self.exp_type}_RAW")
N2_type_grps = N2Cdl_pars_raw.groupby("N2_type")
N2Cdl_pars_raw = N2_type_grps.get_group("Cdl_pars").dropna(axis=1, how="all")
N2Cdl_pars_raw.drop_duplicates(
subset=N2Cdl_pars_raw.columns[0:19], keep="first", inplace=True
)
N2Cdl_pars_raw = FileOperations.ChangeRoot_DF(
N2Cdl_pars_raw, [], coltype="string"
)
Cdl_pars = post_helper.make_uniform_EvRHE(N2Cdl_pars_raw)
Cdl_pars.drop_duplicates(subset=Cdl_pars.columns[0:19], inplace=True)
# Cdl_pars_merge_cols = [i for i in Cdl_pars.columns if i in SampleCodes.columns and not 'Unnamed' in i]
# Cdl_pars_char = pd.merge(Cdl_pars,SampleCodes,on=Cdl_pars_merge_cols,how='left')
# Cdl_pars_char.drop_duplicates(subset=Cdl_pars_char.columns[0:19],inplace=True)
_int = list(set(Cdl_pars.columns).intersection(set(self.EC_index.columns)))
if Cdl_pars.postAST.dropna().empty and len(self.EC_index.columns) != len(_int):
Cdl_pars = Cdl_pars.drop(columns="postAST")
# _int = list(set(Cdl_pars_char.columns).intersection(set(EC_index.columns)))
Cdl_pars = pd.merge(
Cdl_pars,
self.EC_index[["PAR_file", "postAST"]],
on="PAR_file",
suffixes=("", ""),
)
Cdl_pars = Load_from_Indexes.add_missing_ECindex_cols(self.EC_index, Cdl_pars)
setattr(self, f"{self.exp_type}", Cdl_pars)
def _extra_xls_out(self):
if xls_out:
new_N2_pars_char_target = FileOperations.CompareHashDFexport(
Cdl_pars_char, IndexOVV_N2_pars_fn
)
_logger.info(
"PostEC Cdl N2 CVs re-indexed and saved: {0}".format(
new_N2_pars_char_target
)
)
Cdl_pars_char.to_pickle(IndexOVV_N2_pars_fn)
def _extra_plotting(self):
try:
Cdl_pars_char.query('(Sweep_Type_N2 == "cathodic") & (pH < 7)').plot(
y="Cdl",
x="E_RHE",
kind="scatter",
ylim=(0, 0.08),
title="checking plot: Cdl in acid",
)
# Cdl_pars_char.query('(Sweep_Type_N2 == "cathodic") & (pH < 7)').groupby('BET_cat_agg').plot(y='Cdl',x='E_RHE',colormap='viridis',kind='scatter',ylim=(0,0.08),title='Cdl in acid')
if extra_plotting:
Cdl_pars_char.query('(Sweep_Type_N2 == "cathodic") & (pH > 7)').plot(
y="Cdl",
x="E_RHE",
c="BET_cat_agg",
colormap="viridis",
kind="scatter",
ylim=(0, 0.03),
title="Cdl in alkaline",
)
alkCdl = Cdl_pars_char.query('(Sweep_Type_N2 == "cathodic") & (pH > 7)')
acidCdl = Cdl_pars_char.query(
'(Sweep_Type_N2 == "cathodic") & (pH < 7)'
)
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.plot_trisurf(alkCdl.E_RHE,alkCdl.Cdl,alkCdl.BET_cat_agg,cmap=cm.viridis)
Cdl_atE = Cdl_pars_char.loc[
(Cdl_pars_char.Sweep_Type_N2 == "cathodic")
& (np.isclose(Cdl_pars_char["E_RHE"], 0.5, atol=0.02))
]
fig, ax = plt.subplots()
for n, Ogr in Cdl_atE.query(
'(Sweep_Type_N2 == "cathodic") & (pH < 7)'
).groupby("postAST"):
c_set = "g" if n == "no" else "r"
Ogr.plot(
x="BET_cat_agg",
y="Cdl",
s=50,
c=c_set,
kind="scatter",
label=n,
title="N2 Cdl vs BET in acid",
ax=ax,
ylim=(0, 50e-3),
)
fig, ax = plt.subplots()
for n, Ogr in Cdl_atE.query(
'(Sweep_Type_N2 == "cathodic") & (pH > 7)'
).groupby("postAST"):
c_set = "g" if n == "no" else "r"
Ogr.plot(
x="BET_cat_agg",
y="Cdl",
s=50,
c=c_set,
kind="scatter",
label=n,
title="N2 Cdl vs BET in alk",
ax=ax,
ylim=(0, 50e-3),
)
except Exception as e:
_logger.warning(f"PostEC Cdl N2 CVs extra plotting fail:\n{e}")
class CollectPostOVV:
"""Loops over all index files and merges them with the RunOVV"""
def __init__():
pass
@staticmethod
def LoadPostOVV(reload=False):
PostDestDir = FindExpFolder("VERSASTAT").DestDir.joinpath("PostEC")
SampleCodes = FindExpFolder().LoadSampleCode()
# CS_parts_PDD = FileOperations.find_CS_parts(PostDestDir)
if reload == True:
postOVVout = CollectPostOVV.LoadIndexes(reload=True)
else:
try:
postOVVout = CollectPostOVV.LoadIndexes(reload=False)
except Exception as e:
logging.warning(
"CollectPostOVV no Indexes available: {0}. Using postEC_Organized".format(
e
)
)
postOVVout = pd.read_excel(
PostDestDir.joinpath("postEC_Organized.xlsx"), index_col=[0]
)
# pd.read_excel(PostDestDir.joinpath('SampleCodeLst.xlsx'))
# CS_parts_pOVV = FileOperations.find_CS_parts(postOVVout.Exp_dir.iloc[0])
# if CS_parts_PDD[0] != CS_parts_pOVV[0]:
# chLst = [CS_parts_PDD[0].joinpath(FileOperations.find_CS_parts(i)[1]) for i in postOVVout.SourceFilename.values]
# postOVVout['SourceFilename'] = chLst
# else:
# pass
postSample = pd.merge(postOVVout, SampleCodes, on="SampleID", how="left")
print("Types:", " , ".join([str(i) for i in postSample.Type_output.unique()]))
postSample.PAR_file = postSample.PAR_file.astype(str)
postSample = FileOperations.ChangeRoot_DF(
postSample,
[
"EXP_dir",
"Dest_dir",
"PAR_file",
"PAR_file_Ring",
"ORR_act_N2_bg",
"DestFile",
"SourceFilename",
],
)
return postSample
# def RunFolderCopy(serie):
# postOVVlst = [outLst.append(PostEC.FromListgrp(n,gr.EXP_dir.unique()[0])) for n,gr in serie.groupby(by=['Dest_dir'])]
# return postOVVlst
@staticmethod
def LoadIndexes(reload=False):
IndexOVV_fn = FindExpFolder("VERSASTAT").DestDir.joinpath(
"IndexOVV_v{0}.xlsx".format(FileOperations.version)
)
if IndexOVV_fn.exists() and not reload:
Index_merged = pd.read_excel(IndexOVV_fn, index_col=[0])
Index_merged = FileOperations.ChangeRoot_DF(
Index_merged,
[
"EXP_dir",
"Dest_dir",
"PAR_file",
"PAR_file_Ring",
"ORR_act_N2_bg",
"DestFile",
"SourceFilename",
],
)
_logger.info("PostEC loaded IndexOVV from recent: {0}".format(IndexOVV_fn))
else:
_logger.info(
"PostEC reloading IndexOVV from Index files and Exp dir files!!"
)
OnlyRecentMissingOVV = ECRunOVV(load=1).index
# ['EXP_dir','Dest_dir','PAR_file','PAR_file_Ring', 'ORR_act_N2_bg','DestFile']
OnlyRecentMissingOVV = FileOperations.ChangeRoot_DF(
OnlyRecentMissingOVV, []
)
OnlyRecentMissingOVV.PAR_file = OnlyRecentMissingOVV.PAR_file.astype(str)
# if index_source == 'ExpDirs':
idx_files = [
list(Path(i).rglob("**/*index*.xlsx"))
for i in OnlyRecentMissingOVV.Dest_dir.unique()
if list(Path(i).rglob("**/*index.xlsx"))
]
# for i in OnlyRecentMissingOVV.Dest_dir.unique():
# [idx_files.append([a for a in a if a]) for a in [(Path(i).rglob('index.xlsx')) for i in OnlyRecentMissingOVV.Dest_dir.unique()]]
# idx_dir = FindExpFolder('VERSASTAT').IndexDir
# idx_files = idx_dir.rglob('*.xlsx')
# subset=['PAR_file','DestFile','Type_output','Script_run_date']
idx_lst = set([a for i in idx_files for a in i])
idx_mtime = [
(i, (dt.datetime.now() - dt.datetime.fromtimestamp(i.stat().st_mtime)))
for i in idx_lst
]
# print(f'len {len(idx_lst)} and set {len(set(idx_lst))}')
alst = (
[]
) # Alternative = pd.concat([[pd.read_excel(c,index_col=[0]) for c in a ] for b in idx_files],sort=False,ignore_index=True)
for idxfp in idx_lst:
df = pd.read_excel(idxfp, index_col=[0])
df["IndexSource"] = idxfp
alst.append(df)
Index_from_expdirs_all = pd.concat(
[i for i in alst], sort=False, ignore_index=True
)
Index_from_expdirs_all.sort_values(
"Script_run_date", ascending=False, inplace=True
)
Index_from_expdirs = Index_from_expdirs_all.drop_duplicates(keep="first")
Index_from_expdirs = FileOperations.ChangeRoot_DF(Index_from_expdirs, [])
idx_exp_tDelta = [
(n, pd.to_datetime(dt.datetime.now()) - i["Script_run_date"])
for n, i in Index_from_expdirs.iterrows()
]
Index_from_expdirs = Index_from_expdirs.assign(
**{
"Source": "ExpDirs",
"Time_since_run": [pd.to_timedelta(i[1]) for i in idx_exp_tDelta],
}
)
# Index_from_expdirs['Time_since_run'] = [pd.to_timedelta(pd.to_datetime(datetime.now())-i) for i in Index_from_expdirs['Script_run_date'].values]
# limit = pd.to_timedelta('7h')
# ['Time_since_run'] = [pd.to_timedelta(pd.to_datetime(datetime.now())-i) for i in Index['Script_run_date'].values]
# Index = Index.loc[Index['Time_since_run'] < limit]
# Index = Index.iloc[dups].loc[Index['Time_since_run'] < limit]
# else:
# dups.append(gr.Time_since_run.idxmin())
# 1elif index_source == 'IndexDir':
IndexDir_idxfiles = list(
FindExpFolder("VERSASTAT").IndexDir.rglob("*.xlsx")
)
Index_from_idxdir_all = pd.concat(
[
pd.read_excel(i, index_col=[0]).assign(IndexSource=i)
for i in IndexDir_idxfiles
],
sort=False,
ignore_index=True,
)
Index_from_idxdir_all.sort_values(
"Script_run_date", ascending=False, inplace=True
)
Index_from_idxdir = Index_from_idxdir_all.drop_duplicates(keep="first")
Index_from_idxdir = FileOperations.ChangeRoot_DF(Index_from_idxdir, [])
Index_from_idxdir = Index_from_idxdir.assign(**{"Source": "IndexDir"})
Index_from_idxdir["Time_since_run"] = [
pd.to_timedelta(pd.to_datetime(dt.datetime.now()) - i)
for i in Index_from_idxdir["Script_run_date"].values
]
# dup_idxdir = Index_from_idxdir.loc[Index_from_idxdir.DestFile.duplicated() == True]
dups_date, singles, others, unused_dups = [], [], [], []
for n, gr in Index_from_idxdir.groupby(
["PAR_file", "DestFile", "Type_output"]
):
# Indexes.groupby(['PAR_file','DestFile','Type_output','ScanRate','Segment']):
if len(gr) > 1:
dgr = gr
# print(n,gr.Time_since_run.unique())
dups_date.append(gr.Time_since_run.idxmin())
unused_dups.append(
list(set(gr.index) - {gr.Time_since_run.idxmin()})
)
elif len(gr) == 1:
singles.append(gr.index[0])
else:
others.append(gr.index)
dup_fltr_idxdir = Index_from_idxdir.loc[singles + dups_date]
# Indexes = pd.merge(Index_from_expdirs,Index_from_idxdir, on=['PAR_file','DestFile','Type_output','ScanRate','Segment','Sweep_Type','Source'])
Indexes = pd.concat([Index_from_expdirs, dup_fltr_idxdir], sort=False)
# Indexes['Time_since_run'] = [pd.to_timedelta(pd.to_datetime(datetime.now())-i) for i in Indexes['Script_run_date'].values]
Indexes = Indexes.dropna(
subset=["PAR_file", "DestFile", "Type_output"]
).reset_index()
dups_date, singles, others = [], [], []
Idxgr = Indexes.groupby(["PAR_file", "DestFile", "Type_output"])
for n, gr in Idxgr:
# Indexes.groupby(['PAR_file','DestFile','Type_output','ScanRate','Segment']):
if len(gr) > 1:
dgr = gr
idxmin = gr.Time_since_run.idxmin()
# print(n,gr.Time_since_run.unique())
dups_date.append([idxmin, gr.loc[idxmin, "Source"]])
elif len(gr) == 1:
singles.append(gr.index[0])
else:
others.append(gr.index)
# for n2,gr2 in OnlyRecentMissingOVV.groupby('PAR_file'):
# if len(gr2) > 1:
# dgr2 = gr2
# Index = Index.iloc[dups].loc[Index['Time_since_run'] < limit]
Index = Indexes.loc[singles + [i[0] for i in dups_date]].dropna(
subset=["DestFile"]
)
# for a in Index.DestFile.values:
# try: Path(a).is_file()
# except: print(a)
# if not any([Path(i).exists() for i in Index.DestFile.values]):
# Index = FileOperations.ChangeRoot_DF(Index,['PAR_file','DestFile']) 'EXP_dir','Dest_dir','PAR_file','PAR_file_Ring','ORR_act_N2_bg','DestFile','SourceFilename'
Index = FileOperations.ChangeRoot_DF(Index, [])
Index = Index.assign(
**{
"Type_Exp": Index["Type_output"],
"SourceFilename": [Path(str(i)) for i in Index["DestFile"].values],
}
)
# Index['Type_Exp'] = Index['Type_output']
# Index['SourceFilename'] = [Path(str(i)) for i in Index['DestFile'].values]
Index.PAR_file = Index.PAR_file.astype(str)
Index_undup = Index.loc[
(
Index.duplicated(
subset=[
"PAR_file",
"DestFile",
"Type_output",
"Time_since_run",
"Source",
]
)
== False
)
]
idx_merge_cols = [
i
for i in Index_undup.columns
if i in OnlyRecentMissingOVV.columns and not "Segment" in i
]
Index_merged = pd.merge(
Index_undup, OnlyRecentMissingOVV, on="PAR_file", how="left"
)
Index_merged.PAR_file = [
Path(str(i)) for i in Index_merged["PAR_file"].values
]
new_IndexOVV_target = FileOperations.CompareHashDFexport(
Index_merged, IndexOVV_fn
)
try:
_logger.info(
"PostEC re-indexed and saved: {0}".format(new_IndexOVV_target)
)
except:
print("no log")
return Index_merged
@staticmethod
def MatchPostASTs(postOVVout):
# postOVVout.postAST.unique()
# [(n,len(gr)) for n,gr in postOVVout.groupby('postAST')]
faillst, fail_index_gr = [], []
matchAST_lst, non_uniq_lst = [], []
for nAST, ASTgr in postOVVout.query(
'(postAST != "no") & (postAST != "postORR")'
).groupby(["postAST", "PAR_date", "PAR_file"]):
nAST, ASTgr
# for nDT,grDT in ASTgr.groupby(')
if ASTgr.PAR_file.nunique() == 1 and ASTgr.Source.nunique() > 1:
ASTgr_grSource = ASTgr.groupby("Source")
ASTgr_info = [
(n, len(gr), gr.Time_since_run.mean()) for n, gr in ASTgr_grSource
]
if len(set([i[1] for i in ASTgr_info])) == 1:
take_source = ASTgr_info[np.argmin([i[2] for i in ASTgr_info])][0]
ASTgr = ASTgr_grSource.get_group(take_source)
fail_index_source_gr = ASTgr_grSource.get_group(
ASTgr_info[np.argmax([i[2] for i in ASTgr_info])][0]
)
fail_index_gr.append(fail_index_source_gr)
EC_exp_uniq = [
(i, ASTgr[i].unique(), ASTgr[i].nunique())
for i in [
c
for c in SampleSelection.EC_exp_cols
+ ["SampleID", "Type_exp", "PAR_file"]
if c in ASTgr.columns
]
]
EC_exp_non_uniq = [i for i in EC_exp_uniq if i[2] != 1]
if EC_exp_non_uniq:
print(
"Not unique PAR_date {0},multiple: {1}".format(
nAST[1], EC_exp_non_uniq
)
)
non_uniq_lst.append([nAST, EC_exp_non_uniq, EC_exp_uniq])
faillst.append(ASTgr)
EC_exp_query = " & ".join(
[
'({0} == "{1}")'.format(i[0], i[1][0])
for i in EC_exp_uniq[1:-1] + [("postAST", ["no"])]
if not "Loading" in i[0]
]
)
past = nAST[1] - pd.to_timedelta(1, unit="D")
past_slice = postOVVout.query("(PAR_date > @past) & (PAR_date < @nAST[1])")
past_query = past_slice.query(EC_exp_query)
if past_query.query(EC_exp_query).empty:
# expand search to all OVV for similar conditions
all_query = postOVVout.query(EC_exp_query)
if not all_query.empty:
preAST = tuple(all_query.PAR_file.unique())
else:
preAST = "no-preAST"
else:
# find previous preAST measurments
preAST = tuple(past_query.PAR_file.unique())
matchAST_lst.append(list(nAST) + [preAST])
if fail_index_gr:
fail_index_filter = | pd.concat(fail_index_gr) | pandas.concat |
##################################################
# Helper functions for preprocessing.
##################################################
# Author: <NAME>
# Email: <EMAIL>
##################################################
import numpy as np
import pandas as pd
from scipy.io import loadmat
from io import BytesIO
from scipy.signal import butter, lfilter
def normalize(data, mean, std):
"""Normalizes all sensor channels
:param data: numpy integer matrix
Sensor data
:param mean: numpy integer array
Array containing mean values for each sensor channel
:param std: numpy integer array
Array containing the standard deviation of each sensor channel
:return:
Normalized sensor data
"""
return (data - mean) / std
def downsample(data_x, data_y, dataset):
"""
Under construction.
"""
x_cols = data_x.columns
data_x = data_x.to_numpy()
y_name = data_y.name
data_y = data_y.to_numpy()
fs = dataset.sr
factor = dataset.down_sample
cutoff = fs / (factor * 2)
init_shapes = (data_x.shape, data_y.shape)
data_x = butter_lowpass_filter(data_x, cutoff, fs)
data_x = data_x[::factor]
data_y = data_y[::factor]
print(f'Downsampled data from {init_shapes[0]} samples @ {fs}Hz => {data_x.shape} samples @ {fs/factor:.2f}Hz')
print(f'Downsampled labels from {init_shapes[1]} labels @ {fs}Hz => {data_y.shape} samples @ {fs/factor:.2f}Hz')
return pd.DataFrame(data_x, columns=x_cols), pd.Series(data_y, name=y_name)
def separate(indices, data_x, data_y, path, prefix):
for i in range(len(indices)):
start = indices[i]
stop = indices[i + 1]
print(f'Separating {prefix} data {start}:{stop} -> {prefix}_data_{i}.npz')
np.save_compressed(f'{path}/{prefix}_data_{i}.npz', data=data_x[start:stop], target=data_y[start:stop])
def safe_load(archive, path, dataset):
if '.mat' in path:
data = loadmat(BytesIO(archive.read(path)))
data = data[dataset.variable_name[path]]
data = | pd.DataFrame(data) | pandas.DataFrame |
from contextlib import nullcontext as does_not_raise
from functools import partial
import pandas as pd
from pandas.testing import assert_series_equal
from solarforecastarbiter import datamodel
from solarforecastarbiter.reference_forecasts import persistence
from solarforecastarbiter.conftest import default_observation
import pytest
def load_data_base(data, observation, data_start, data_end):
# slice doesn't care about closed or interval label
# so here we manually adjust start and end times
if 'instant' in observation.interval_label:
pass
elif observation.interval_label == 'ending':
data_start += pd.Timedelta('1s')
elif observation.interval_label == 'beginning':
data_end -= pd.Timedelta('1s')
return data[data_start:data_end]
@pytest.fixture
def powerplant_metadata():
"""1:1 AC:DC"""
modeling_params = datamodel.FixedTiltModelingParameters(
ac_capacity=200, dc_capacity=200, temperature_coefficient=-0.3,
dc_loss_factor=3, ac_loss_factor=0,
surface_tilt=30, surface_azimuth=180)
metadata = datamodel.SolarPowerPlant(
name='Albuquerque Baseline', latitude=35.05, longitude=-106.54,
elevation=1657.0, timezone='America/Denver',
modeling_parameters=modeling_params)
return metadata
@pytest.mark.parametrize('interval_label,closed,end', [
('beginning', 'left', '20190404 1400'),
('ending', 'right', '20190404 1400'),
('instant', None, '20190404 1359')
])
def test_persistence_scalar(site_metadata, interval_label, closed, end):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
data = pd.Series(100., index=data_index)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp(end, tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
fx = persistence.persistence_scalar(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data=load_data)
expected_index = pd.date_range(
start='20190404 1300', end=end, freq='5min', tz=tz,
closed=closed)
expected = pd.Series(100., index=expected_index)
assert_series_equal(fx, expected)
@pytest.mark.parametrize('obs_interval_label', ('beginning', 'ending',
'instant'))
@pytest.mark.parametrize('interval_label,closed,end', [
('beginning', 'left', '20190406 0000'),
('ending', 'right', '20190406 0000'),
('instant', None, '20190405 2359')
])
def test_persistence_interval(site_metadata, obs_interval_label,
interval_label, closed, end):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min',
interval_label=obs_interval_label)
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
# each element of data is equal to the hour value of its label
data = pd.Series(data_index.hour, index=data_index, dtype=float)
if obs_interval_label == 'ending':
# e.g. timestamp 12:00:00 should be equal to 11
data = data.shift(1).fillna(0)
data_start = pd.Timestamp('20190404 0000', tz=tz)
data_end = pd.Timestamp(end, tz=tz) - pd.Timedelta('1d')
forecast_start = pd.Timestamp('20190405 0000', tz=tz)
interval_length = pd.Timedelta('60min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start='20190405 0000', end=end, freq='60min', tz=tz, closed=closed)
expected_vals = list(range(0, 24))
expected = pd.Series(expected_vals, index=expected_index, dtype=float)
# handle permutations of parameters that should fail
if data_end.minute == 59 and obs_interval_label != 'instant':
expectation = pytest.raises(ValueError)
elif data_end.minute == 0 and obs_interval_label == 'instant':
expectation = pytest.raises(ValueError)
else:
expectation = does_not_raise()
with expectation:
fx = persistence.persistence_interval(
observation, data_start, data_end, forecast_start,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected)
def test_persistence_interval_missing_data(site_metadata):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min',
interval_label='ending')
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404T1200', end='20190406', freq='5min', tz=tz)
# each element of data is equal to the hour value of its label
end = '20190406 0000'
data = pd.Series(data_index.hour, index=data_index, dtype=float)
data = data.shift(1)
data_start = pd.Timestamp('20190404 0000', tz=tz)
data_end = pd.Timestamp(end, tz=tz) - pd.Timedelta('1d')
forecast_start = pd.Timestamp('20190405 0000', tz=tz)
interval_length = pd.Timedelta('60min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start='20190405 0000', end=end, freq='60min', tz=tz, closed='right')
expected_vals = [None] * 12 + list(range(12, 24))
expected = pd.Series(expected_vals, index=expected_index, dtype=float)
fx = persistence.persistence_interval(
observation, data_start, data_end, forecast_start,
interval_length, 'ending', load_data)
assert_series_equal(fx, expected)
@pytest.fixture
def uniform_data():
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
data = pd.Series(100., index=data_index)
return data
@pytest.mark.parametrize(
'interval_label,expected_index,expected_ghi,expected_ac,obsscale', (
('beginning',
['20190404 1300', '20190404 1330'],
[96.41150694741889, 91.6991546408236],
[96.60171202566896, 92.074796727846],
1),
('ending',
['20190404 1330', '20190404 1400'],
[96.2818141290749, 91.5132934827808],
[96.47816752344607, 91.89460837042301],
1),
# test clipped at 2x clearsky
('beginning',
['20190404 1300', '20190404 1330'],
[1926.5828549018618, 1832.4163238767312],
[383.1524464326973, 365.19729186262526],
50)
)
)
def test_persistence_scalar_index(
powerplant_metadata, uniform_data, interval_label,
expected_index, expected_ghi, expected_ac, obsscale):
# ac_capacity is 200 from above
observation = default_observation(
powerplant_metadata, interval_length='5min',
interval_label='beginning')
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label='beginning', variable='ac_power')
data = uniform_data * obsscale
tz = data.index.tzinfo
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
interval_length = pd.Timedelta('30min')
load_data = partial(load_data_base, data)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
expected_index, tz=tz, freq=interval_length)
expected = pd.Series(expected_ghi, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected = pd.Series(expected_ac, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
def test_persistence_scalar_index_instant_obs_fx(
site_metadata, powerplant_metadata, uniform_data):
# instantaneous obs and fx
interval_length = pd.Timedelta('30min')
interval_label = 'instant'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label=interval_label, variable='ac_power')
data = uniform_data
tz = data.index.tzinfo
load_data = partial(load_data_base, data)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1259', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1359', tz=tz)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
['20190404 1300', '20190404 1330'], tz=tz, freq=interval_length)
expected_values = [96.59022431746838, 91.99405501672328]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_values = [96.77231379880752, 92.36198028963426]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
# instant obs and fx, but with offset added to starts instead of ends
data_start = pd.Timestamp('20190404 1201', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1301', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
['20190404 1300', '20190404 1330'], tz=tz, freq=interval_length)
expected_values = [96.55340033645147, 91.89662922267517]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
def test_persistence_scalar_index_invalid_times_instant(site_metadata):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_label = 'instant'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
# instant obs that cover the whole interval - not allowed!
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
interval_length = pd.Timedelta('30min')
with pytest.raises(ValueError):
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
@pytest.mark.parametrize('data_start,data_end,forecast_start,forecast_end', (
('20190404 1201', '20190404 1300', '20190404 1300', '20190404 1400'),
('20190404 1200', '20190404 1259', '20190404 1300', '20190404 1400'),
('20190404 1200', '20190404 1300', '20190404 1301', '20190404 1400'),
('20190404 1200', '20190404 1300', '20190404 1300', '20190404 1359'),
))
def test_persistence_scalar_index_invalid_times_interval(
site_metadata, interval_label, data_start, data_end, forecast_start,
forecast_end):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_length = pd.Timedelta('30min')
# base times to mess with
data_start = pd.Timestamp(data_start, tz=tz)
data_end = pd.Timestamp(data_end, tz=tz)
forecast_start = pd.Timestamp(forecast_start, tz=tz)
forecast_end = pd.Timestamp(forecast_end, tz=tz)
# interval average obs with invalid starts/ends
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
errtext = "with interval_label beginning or ending"
with pytest.raises(ValueError) as excinfo:
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert errtext in str(excinfo.value)
def test_persistence_scalar_index_invalid_times_invalid_label(site_metadata):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_length = pd.Timedelta('30min')
interval_label = 'invalid'
observation = default_observation(
site_metadata, interval_length='5min')
object.__setattr__(observation, 'interval_label', interval_label)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
with pytest.raises(ValueError) as excinfo:
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert "invalid interval_label" in str(excinfo.value)
def test_persistence_scalar_index_low_solar_elevation(
site_metadata, powerplant_metadata):
interval_label = 'beginning'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label=interval_label, variable='ac_power')
# at ABQ Baseline, solar apparent zenith for these points is
# 2019-05-13 12:00:00+00:00 91.62
# 2019-05-13 12:05:00+00:00 90.09
# 2019-05-13 12:10:00+00:00 89.29
# 2019-05-13 12:15:00+00:00 88.45
# 2019-05-13 12:20:00+00:00 87.57
# 2019-05-13 12:25:00+00:00 86.66
tz = 'UTC'
data_start = pd.Timestamp('20190513 1200', tz=tz)
data_end = pd.Timestamp('20190513 1230', tz=tz)
index = pd.date_range(start=data_start, end=data_end,
freq='5min', closed='left')
# clear sky 5 min avg (from 1 min avg) GHI is
# [0., 0.10932908, 1.29732454, 4.67585122, 10.86548521, 19.83487399]
# create data series that could produce obs / clear of
# 0/0, 1/0.1, -1/1.3, 5/5, 10/10, 20/20
# average without limits is (10 - 1 + 1 + 1 + 1) / 5 = 2.4
# average with element limits of [0, 2] = (2 + 0 + 1 + 1 + 1) / 5 = 1
data = pd.Series([0, 1, -1, 5, 10, 20.], index=index)
forecast_start = | pd.Timestamp('20190513 1230', tz=tz) | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 4 10:40:40 2018
@author: Jeon
"""
from sklearn.datasets import load_boston
boston = load_boston()
X = boston['data']
y = boston['target']
print(boston['DESCR'])
print(boston['feature_names'])
import pandas as pd
data = | pd.DataFrame(X, columns=boston['feature_names']) | pandas.DataFrame |
import datetime
import numpy as np
from numpy import nan
import pandas as pd
import pytz
import pytest
from pytz.exceptions import UnknownTimeZoneError
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pvlib.location import Location
from test_solarposition import expected_solpos
from conftest import requires_scipy
aztz = pytz.timezone('US/Arizona')
def test_location_required():
Location(32.2, -111)
def test_location_all():
Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
@pytest.mark.parametrize('tz', [
aztz, 'America/Phoenix', -7, -7.0,
])
def test_location_tz(tz):
Location(32.2, -111, tz)
def test_location_invalid_tz():
with pytest.raises(UnknownTimeZoneError):
Location(32.2, -111, 'invalid')
def test_location_invalid_tz_type():
with pytest.raises(TypeError):
Location(32.2, -111, [5])
def test_location_print_all():
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
expected_str = '\n'.join([
'Location: ',
' name: Tucson',
' latitude: 32.2',
' longitude: -111',
' altitude: 700',
' tz: US/Arizona'
])
assert tus.__str__() == expected_str
def test_location_print_pytz():
tus = Location(32.2, -111, aztz, 700, 'Tucson')
expected_str = '\n'.join([
'Location: ',
' name: Tucson',
' latitude: 32.2',
' longitude: -111',
' altitude: 700',
' tz: US/Arizona'
])
assert tus.__str__() == expected_str
@requires_scipy
def test_get_clearsky():
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
times = pd.DatetimeIndex(start='20160101T0600-0700',
end='20160101T1800-0700',
freq='3H')
clearsky = tus.get_clearsky(times)
expected = pd.DataFrame(data=np.array([
( 0.0, 0.0, 0.0),
(262.77734276159333, 791.1972825869296, 46.18714900637892),
(616.764693938387, 974.9610353623959, 65.44157429054201),
(419.6512657626518, 901.6234995035793, 54.26016437839348),
( 0.0, 0.0, 0.0)],
dtype=[('ghi', '<f8'), ('dni', '<f8'), ('dhi', '<f8')]), index=times)
assert_frame_equal(expected, clearsky, check_less_precise=2)
def test_get_clearsky_ineichen_supply_linke():
tus = Location(32.2, -111, 'US/Arizona', 700)
times = pd.date_range(start='2014-06-24', end='2014-06-25', freq='3h')
times_localized = times.tz_localize(tus.tz)
expected = pd.DataFrame(np.
array([[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 79.73090244, 316.16436502, 40.45759009],
[ 703.43653498, 876.41452667, 95.15798252],
[ 1042.37962396, 939.86391062, 118.44687715],
[ 851.32411813, 909.11186737, 105.36662462],
[ 257.18266827, 646.16644264, 62.02777094],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ]]),
columns=['ghi', 'dni', 'dhi'],
index=times_localized)
out = tus.get_clearsky(times_localized, linke_turbidity=3)
assert_frame_equal(expected, out, check_less_precise=2)
def test_get_clearsky_haurwitz():
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
times = pd.DatetimeIndex(start='20160101T0600-0700',
end='20160101T1800-0700',
freq='3H')
clearsky = tus.get_clearsky(times, model='haurwitz')
expected = pd.DataFrame(data=np.array(
[[ 0. ],
[ 242.30085588],
[ 559.38247117],
[ 384.6873791 ],
[ 0. ]]),
columns=['ghi'],
index=times)
assert_frame_equal(expected, clearsky)
def test_get_clearsky_simplified_solis():
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
times = pd.DatetimeIndex(start='20160101T0600-0700',
end='20160101T1800-0700',
freq='3H')
clearsky = tus.get_clearsky(times, model='simplified_solis')
expected = pd.DataFrame(data=np.
array([[ 0. , 0. , 0. ],
[ 70.00146271, 638.01145669, 236.71136245],
[ 101.69729217, 852.51950946, 577.1117803 ],
[ 86.1679965 , 755.98048017, 385.59586091],
[ 0. , 0. , 0. ]]),
columns=['dhi', 'dni', 'ghi'],
index=times)
expected = expected[['ghi', 'dni', 'dhi']]
assert_frame_equal(expected, clearsky, check_less_precise=2)
def test_get_clearsky_simplified_solis_apparent_elevation():
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
times = pd.DatetimeIndex(start='20160101T0600-0700',
end='20160101T1800-0700',
freq='3H')
solar_position = {'apparent_elevation': pd.Series(80, index=times),
'apparent_zenith': pd.Series(10, index=times)}
clearsky = tus.get_clearsky(times, model='simplified_solis',
solar_position=solar_position)
expected = pd.DataFrame(data=np.
array([[ 131.3124497 , 1001.14754036, 1108.14147919],
[ 131.3124497 , 1001.14754036, 1108.14147919],
[ 131.3124497 , 1001.14754036, 1108.14147919],
[ 131.3124497 , 1001.14754036, 1108.14147919],
[ 131.3124497 , 1001.14754036, 1108.14147919]]),
columns=['dhi', 'dni', 'ghi'],
index=times)
expected = expected[['ghi', 'dni', 'dhi']]
assert_frame_equal(expected, clearsky, check_less_precise=2)
def test_get_clearsky_simplified_solis_dni_extra():
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
times = pd.DatetimeIndex(start='20160101T0600-0700',
end='20160101T1800-0700',
freq='3H')
clearsky = tus.get_clearsky(times, model='simplified_solis',
dni_extra=1370)
expected = pd.DataFrame(data=np.
array([[ 0. , 0. , 0. ],
[ 67.82281485, 618.15469596, 229.34422063],
[ 98.53217848, 825.98663808, 559.15039353],
[ 83.48619937, 732.45218243, 373.59500313],
[ 0. , 0. , 0. ]]),
columns=['dhi', 'dni', 'ghi'],
index=times)
expected = expected[['ghi', 'dni', 'dhi']]
assert_frame_equal(expected, clearsky)
def test_get_clearsky_simplified_solis_pressure():
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
times = pd.DatetimeIndex(start='20160101T0600-0700',
end='20160101T1800-0700',
freq='3H')
clearsky = tus.get_clearsky(times, model='simplified_solis',
pressure=95000)
expected = pd.DataFrame(data=np.
array([[ 0. , 0. , 0. ],
[ 70.20556637, 635.53091983, 236.17716435],
[ 102.08954904, 850.49502085, 576.28465815],
[ 86.46561686, 753.70744638, 384.90537859],
[ 0. , 0. , 0. ]]),
columns=['dhi', 'dni', 'ghi'],
index=times)
expected = expected[['ghi', 'dni', 'dhi']]
assert_frame_equal(expected, clearsky, check_less_precise=2)
def test_get_clearsky_simplified_solis_aod_pw():
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
times = pd.DatetimeIndex(start='20160101T0600-0700',
end='20160101T1800-0700',
freq='3H')
clearsky = tus.get_clearsky(times, model='simplified_solis',
aod700=0.25, precipitable_water=2.)
expected = pd.DataFrame(data=np.
array([[ 0. , 0. , 0. ],
[ 85.77821205, 374.58084365, 179.48483117],
[ 143.52743364, 625.91745295, 490.06254157],
[ 114.63275842, 506.52275195, 312.24711495],
[ 0. , 0. , 0. ]]),
columns=['dhi', 'dni', 'ghi'],
index=times)
expected = expected[['ghi', 'dni', 'dhi']]
assert_frame_equal(expected, clearsky, check_less_precise=2)
def test_get_clearsky_valueerror():
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
times = pd.DatetimeIndex(start='20160101T0600-0700',
end='20160101T1800-0700',
freq='3H')
with pytest.raises(ValueError):
clearsky = tus.get_clearsky(times, model='invalid_model')
def test_from_tmy_3():
from test_tmy import tmy3_testfile
from pvlib.tmy import readtmy3
data, meta = readtmy3(tmy3_testfile)
loc = Location.from_tmy(meta, data)
assert loc.name is not None
assert loc.altitude != 0
assert loc.tz != 'UTC'
| assert_frame_equal(loc.tmy_data, data) | pandas.util.testing.assert_frame_equal |
# Copyright 2019 <NAME> GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from typing import Dict
from typing import List
import unittest
from pandas import DataFrame
from pandas.testing import assert_frame_equal
from tracetools_analysis.processor import Processor
from tracetools_analysis.processor.profile import ProfileHandler
from tracetools_read import DictEvent
# TEST DATA
#
# + Threads:
# 0: does whatever
# 1: contains one instance of the functions of interest
# 2: contains another instance of the functions of interest
#
# + Functions structure
# function_a
# function_aa
# function_b
#
# + Timeline
# tid 1 2
# func a aa b a aa b
# time
# 0 : whatever
# 3 : sched_switch from tid 0 to tid 1
# 5 : tid 1, func_entry: function_a
# 7 : sched_switch from tid 1 to tid 0 2
# 10 : sched_switch from tid 0 to tid 2
# 11 : tid 2, func_entry: function_a
# 15 : sched_switch from tid 2 to tid 1 4
# 16 : tid 1, func_entry: function_aa 1
# 20 : sched_switch from tid 1 to tid 2 4 4
# 27 : tid 2, func_entry: function_aa 7
# 29 : sched_switch from tid 2 to tid 1 2 2
# 30 : tid 1, func_exit: (function_aa) 1 1
# 32 : sched_switch from tid 1 to tid 0 2
# 34 : sched_switch from tid 0 to tid 2
# 35 : tid 2, func_exit: (function_aa) 1 1
# 37 : tid 2, func_exit: (function_a) 2
# 39 : tid 2, func_entry: function_b
# 40 : tid 2, func_exit: (function_b) 1
# 41 : sched_switch from tid 2 to tid 1
# 42 : tid 1, func_exit: (function_a) 1
# 44 : tid 1, func_entry: function_b
# 47 : sched_switch from tid 1 to tid 0 3
# 49 : sched_switch from tid 0 to tid 1
# 60 : tid 1, func_exit: (function_b) 11
# 69 : sched_switch from tid 1 to tid 0
#
# total 11 5 14 16 3 1
input_events = [
{
'_name': 'sched_switch',
'_timestamp': 3,
'prev_tid': 0,
'next_tid': 1,
},
{
'_name': 'lttng_ust_cyg_profile_fast:func_entry',
'_timestamp': 5,
'vtid': 1,
'addr': '0xfA',
},
{
'_name': 'sched_switch',
'_timestamp': 7,
'prev_tid': 1,
'next_tid': 0,
},
{
'_name': 'sched_switch',
'_timestamp': 10,
'prev_tid': 0,
'next_tid': 2,
},
{
'_name': 'lttng_ust_cyg_profile_fast:func_entry',
'_timestamp': 11,
'vtid': 2,
'addr': '0xfA',
},
{
'_name': 'sched_switch',
'_timestamp': 15,
'prev_tid': 2,
'next_tid': 1,
},
{
'_name': 'lttng_ust_cyg_profile_fast:func_entry',
'_timestamp': 16,
'vtid': 1,
'addr': '0xfAA',
},
{
'_name': 'sched_switch',
'_timestamp': 20,
'prev_tid': 1,
'next_tid': 2,
},
{
'_name': 'lttng_ust_cyg_profile_fast:func_entry',
'_timestamp': 27,
'vtid': 2,
'addr': '0xfAA',
},
{
'_name': 'sched_switch',
'_timestamp': 29,
'prev_tid': 2,
'next_tid': 1,
},
{
'_name': 'lttng_ust_cyg_profile_fast:func_exit',
'_timestamp': 30,
'vtid': 1,
},
{
'_name': 'sched_switch',
'_timestamp': 32,
'prev_tid': 1,
'next_tid': 0,
},
{
'_name': 'sched_switch',
'_timestamp': 34,
'prev_tid': 0,
'next_tid': 2,
},
{
'_name': 'lttng_ust_cyg_profile_fast:func_exit',
'_timestamp': 35,
'vtid': 2,
},
{
'_name': 'lttng_ust_cyg_profile_fast:func_exit',
'_timestamp': 37,
'vtid': 2,
},
{
'_name': 'lttng_ust_cyg_profile_fast:func_entry',
'_timestamp': 39,
'vtid': 2,
'addr': '0xfB',
},
{
'_name': 'lttng_ust_cyg_profile_fast:func_exit',
'_timestamp': 40,
'vtid': 2,
},
{
'_name': 'sched_switch',
'_timestamp': 41,
'prev_tid': 2,
'next_tid': 1,
},
{
'_name': 'lttng_ust_cyg_profile_fast:func_exit',
'_timestamp': 42,
'vtid': 1,
},
{
'_name': 'lttng_ust_cyg_profile_fast:func_entry',
'_timestamp': 44,
'vtid': 1,
'addr': '0xfB',
},
{
'_name': 'sched_switch',
'_timestamp': 47,
'prev_tid': 1,
'next_tid': 0,
},
{
'_name': 'sched_switch',
'_timestamp': 49,
'prev_tid': 0,
'next_tid': 1,
},
{
'_name': 'lttng_ust_cyg_profile_fast:func_exit',
'_timestamp': 60,
'vtid': 1,
},
{
'_name': 'sched_switch',
'_timestamp': 69,
'prev_tid': 1,
'next_tid': 0,
},
]
expected = [
{
'tid': 1,
'depth': 1,
'function_name': '0xfAA',
'parent_name': '0xfA',
'start_timestamp': 16,
'duration': 14,
'actual_duration': 5,
},
{
'tid': 2,
'depth': 1,
'function_name': '0xfAA',
'parent_name': '0xfA',
'start_timestamp': 27,
'duration': 8,
'actual_duration': 3,
},
{
'tid': 2,
'depth': 0,
'function_name': '0xfA',
'parent_name': None,
'start_timestamp': 11,
'duration': 26,
'actual_duration': 16,
},
{
'tid': 2,
'depth': 0,
'function_name': '0xfB',
'parent_name': None,
'start_timestamp': 39,
'duration': 1,
'actual_duration': 1,
},
{
'tid': 1,
'depth': 0,
'function_name': '0xfA',
'parent_name': None,
'start_timestamp': 5,
'duration': 37,
'actual_duration': 11,
},
{
'tid': 1,
'depth': 0,
'function_name': '0xfB',
'parent_name': None,
'start_timestamp': 44,
'duration': 16,
'actual_duration': 14,
},
]
address_to_func = {
'0xfA': '0xfA',
'0xfAA': '0xfAA',
'0xfB': '0xfB',
}
class TestProfileHandler(unittest.TestCase):
def __init__(self, *args) -> None:
super().__init__(
*args,
)
@staticmethod
def build_expected_df(expected_data: List[Dict[str, Any]]) -> DataFrame:
# Columns should be in the same order
return | DataFrame.from_dict(expected_data) | pandas.DataFrame.from_dict |
import pandas as pd
import numpy as np
import argparse
import os
import time
import six
from datetime import datetime, timedelta
if os.name == "nt":
import win32com.client as wincl
def _say(sentence, sleepseconds=0.5):
try:
speaker = wincl.Dispatch("SAPI.SpVoice")
speaker.Speak(sentence)
time.sleep(sleepseconds)
except Exception as ex:
print("Error in speaking: ".format(ex.msg))
else:
def _say(sentence, sleepseconds=0.5):
os.system("say {0}".format(sentence))
time.sleep(sleepseconds)
CORRECT_RES = ["Thats Correct", "Correct", "Thats right. Way to go.", "Good Job.", "Excellent", "Thats correct. Good Effort"]
def get_words_to_reveiw(wordlist):
now = datetime.now()
selected_word = [word for word in wordlist if word.due_date < now]
no_words = len(selected_word)
# if more than 15 words, show only 10-15 words
if no_words > 20:
selected_word = selected_word[:np.random.randint(10,16)]
if not selected_word:
print("Nothing to review.")
_say("Nothing to review.")
else:
print("{} words selected out of {}".format(len(selected_word), no_words))
return selected_word
THESHOLDS = [timedelta(seconds=0), timedelta(hours=1), timedelta(hours=3), timedelta(hours=7), timedelta(hours=24) , timedelta(days=2), timedelta(days=3), timedelta(days=7), timedelta(days=14), timedelta(days=30), timedelta(days=90)]
class wordline:
def __init__(self, word, num=0, due_date=datetime.now(), active=True):
self.word = word
self.num = num
self.due_date = due_date
self.active = active
def increment(self):
if self.num < len(THESHOLDS):
self.num = self.num + 1
else:
self.num = len(THESHOLDS)
def decrement(self):
# punish if wrong after 30 days
if self.num > 8:
self.num -=4
elif self.num >= 0:
self.num = self.num - 1
else:
self.num = 0
def update_due_date(self):
try:
self.due_date = datetime.now() + THESHOLDS[self.num]
except Exception as ex:
self.due_date = datetime.now() + THESHOLDS[self.num-1]
def __repr__(self):
return "{0} {1} {2} {3}".format(self.word, self.num, self.active, self.due_date)
def ask(text):
return six.moves.input(text)
def confirm(text):
while True:
choice = input(text.strip(' ') + ' ').lower()
if choice in ('yes', 'y', 'ye', 'yep', 'yeah'):
return True
elif choice in ('no', 'n', 'nah', 'nay'):
return False
else:
print("Please respond with 'yes' or 'no'")
def format_timedelta(delta):
seconds = abs(int(delta.total_seconds()))
periods = [
(60 * 60 * 24 * 365, 'year'),
(60 * 60 * 24 * 30, 'month'),
(60 * 60 * 24, 'day'),
(60 * 60, 'hour'),
(60, 'minute'),
(1, 'second')
]
parts = []
for period_seconds, period_name in periods:
if seconds > period_seconds:
period_value, seconds = divmod(seconds, period_seconds)
part = '%s %s' % (period_value, period_name)
if period_value > 1:
part += 's'
parts.append(part)
ret = ', '.join(parts)
if delta.total_seconds() < 0:
ret = '-' + ret
return ret
def do_review_one(word):
while True:
print("Answer: ")
_say_question(word.word)
answer_text = ask("")
if answer_text.strip().lower() == word.word.lower():
is_correct = True
else:
is_correct = False
if is_correct:
word.increment()
else:
word.decrement()
word.update_due_date()
return word, is_correct, answer_text
def _say_question(word,sleepseconds=0.0):
_say(word, sleepseconds)
_say("The word is {}".format(word), sleepseconds=sleepseconds)
def do_review(wordslist):
np.random.shuffle(wordslist)
words_done = []
while True:
if not wordslist:
break
print("\n{0} words to go. ".format(len(wordslist)))
word = np.random.choice(wordslist)
word_, is_correct, ans = do_review_one(word)
if is_correct:
wordslist.remove(word)
print('Correct')
_say(np.random.choice(CORRECT_RES))
else:
correct_word = " ".join(word.word)
print('Incorrect. The Answer is : %s' % correct_word.upper())
_say("Incorrect. You spelled {}".format(ans))
_say("The Correct Answer is : ")
_say(correct_word)
words_done.append(word_)
return words_done
def get_words(fname="words.csv"):
if os.path.exists(fname):
df = pd.read_csv(fname, infer_datetime_format=True, parse_dates=["due_date"])
wordlists = [wordline(row.word, num=row.num, due_date=row.due_date, active=row.active) for _, row in df.iterrows()]
else:
wordlists=[]
return wordlists
def save_words(wordslist, fname="words.csv"):
| pd.DataFrame(data=[(word.word, word.due_date, word.num, word.active) for word in wordslist], columns=["word","due_date","num","active"]) | pandas.DataFrame |
import logging
from typing import List, Optional, Tuple
import numpy as np
import pandas as pd
from temporis.dataset.ts_dataset import AbstractTimeSeriesDataset
import antropy as ant
from itertools import combinations
logger = logging.getLogger(__name__)
def numerical_features(ds: AbstractTimeSeriesDataset) -> pd.DataFrame:
features = ds.numeric_features()
data = {c: {"minimum": [], "maximum": [], 'mean':[]} for c in features}
for life in ds:
for c in features:
data[c]['minimum'].append(life[c].min())
data[c]['maximum'].append(life[c].max())
data[c]['mean'].append(life[c].mean())
for k in data.keys():
data[k]['minimum'] = np.mean(data[k]['minimum'])
data[k]['maximum'] = np.mean(data[k]['maximum'])
data[k]['mean'] = np.mean(data[k]['mean'])
df= pd.DataFrame(data)
return df.T
def sample_rate(ds: AbstractTimeSeriesDataset, unit:Optional[str] = 's') -> np.ndarray:
"""Obtain an array of time difference between two consecutive samples
If the index it's a timestamp, the time difference will be converted to the provided
unit
Parameters
----------
ds : AbstractTimeSeriesDataset
The dataset
unit : Optional[str], optional
Unit to convert the timestamps differences, by default 's'
Returns
-------
np.ndarray
"""
time_diff = []
for life in ds:
diff = np.diff(life.index.values)
if | pd.api.types.is_timedelta64_ns_dtype(diff.dtype) | pandas.api.types.is_timedelta64_ns_dtype |
"""
DatabaseRates object created from rates downloaded from the URDB,
https://openei.org.
"""
#public
import sys
import glob
import logging
import warnings
import numpy as np
import pandas as pd
from datetime import datetime
sys.path.append('../')
import config as config
import lcoc.readwrite as readwrite
import lcoc.helpers as helpers
#settings
pd.options.mode.chained_assignment = None
class DatabaseRates(object):
"""
Object for working with data downloaded from NREL's Utility Rate
Database (URDB). Rates in the URDB are checked at updated annually by NREL
under funding from the U.S. Department of Energy's Solar Energy
Technologies Program, in partnership with Illinois State University's
Intstitute for Regulatory Policy Studies.
Attributes
-----------
source:
URL used to download URDB data
rate_data:
pandas.DataFrame where each row represents a unique utility rate,
unfiltered & unprocessed from the URDB.
res_rate_data:
pandas.DataFrame where each row represents a unique residential utility
rate
com_rate_data:
pandas.DataFrame where each row represents a unique commerical utility
rate
prev_exists:
Boolean indicating whether version of dataset has been previously ran
"""
def __init__(self, urdb_file=None):
# Download URDB data
self.source='https://openei.org/apps/USURDB/download/usurdb.csv.gz'
# Load URDB data
if urdb_file is not None:
self.rate_data = pd.read_csv(urdb_file, low_memory=False)
else:
self.rate_data = readwrite.read_urdb_data(self.source)
# Assign rate_id
self.rate_data['rate_id'] = list(range(1, len(self.rate_data)+1))
# Save copy of URDB data (if unique) to data/urdb
self.prev_exists = readwrite.write_urdb_rate_data(self.rate_data)
# Separate residential & commercial rates into separate dfs
self.res_rate_data = self.rate_data[self.rate_data['sector']=='Residential']
self.com_rate_data = self.rate_data[self.rate_data['sector'].isin(['Commercial', 'Industrial'])]
def filter_stale_rates(self, industry):
"""
Removes rates w/ specified end date, so that only rates without
end dates remain (active rates). The industry arg must be "residential"
or "commercial".
"""
industry = industry.lower()
if industry == 'residential':
df = self.res_rate_data
elif industry == 'commercial':
df = self.com_rate_data
else:
raise ValueError("Industry must be 'residential' or 'commercial'!")
df = df[df.enddate.isnull()]
if industry == 'residential':
self.res_rate_data = df
elif industry == 'commercial':
self.com_rate_data = df
def classify_rate_structures(self, industry, ev_rate_words_file='filters/urdb_res_ev_specific_rate_words.txt'):
"""
Adds five columns to self.res_rate_data and four to self.com_rate_data,
['is_ev_rate' (residential only), is_demand_rate', 'is_tier_rate', '
is_seasonal_rate', 'is_tou_rate'], that are binary classifiers
indicating whether a rate is a demand rate, tier rate, seasonal rate, and/or
TOU rate. Note that rates may be combinations of 1+ rate structure.
Tier rates and flat rates are mutally exclusive, meaning when
'is_tier_rate'==0, it is a flat rate.
"""
industry = industry.lower()
if industry == 'residential':
df = self.res_rate_data
with open(ev_rate_words_file, 'r') as f:
filters = f.read().splitlines()
df['is_ev_rate'] = ((df.name.apply(lambda x: helpers.contains_filter_phrases(x, filters)==True))|
(df.description.apply(lambda x: helpers.contains_filter_phrases(x, filters)))==True).map(int)
elif industry == 'commercial':
df = self.com_rate_data
else:
raise ValueError("industry must be 'residential' or 'commercial'!")
# Classify by rate structure
is_demand, is_tier, is_seasonal, is_tou = [], [], [], []
tier_cols = []
for tier in range(1,11): #period 0
tier_cols.append('energyratestructure/period0/tier{}rate'.format(tier))
for tier in range(1,8): #period 1
tier_cols.append('energyratestructure/period1/tier{}rate'.format(tier))
for per in range(2,6): #period 2-5
for tier in range(1,5):
tier_cols.append('energyratestructure/period{0}/tier{1}rate'.format(per, tier))
for _, row in df.iterrows():
# Demand rate check
if (np.isnan(float(row['flatdemandstructure/period0/tier0rate']))
and np.isnan(float(row['demandratestructure/period0/tier0rate']))):
is_demand.append(0)
else:
is_demand.append(1)
# Tier rate check
tier_check = int(row[tier_cols].isnull().all()==False)
is_tier.append(tier_check)
# Seasonal & TOU rate check
try:
year_wkdays = [ls.replace('[', '').replace(',', '').replace(' ', '') for ls in str(row['energyweekdayschedule']).split(']')][:-2]
year_wknds = [ls.replace('[', '').replace(',', '').replace(' ', '') for ls in str(row['energyweekendschedule']).split(']')][:-2]
#seasonal
if (len(set(year_wkdays))>1) or (len(set(year_wknds))>1):
seasonal=1
else:
seasonal=0
is_seasonal.append(seasonal)
#TOU
tous =[]
for wkday_month, wknd_month in zip(year_wkdays, year_wknds):
if (len(set(wkday_month))>1) or (len(set(wknd_month))>1):
tous.append(1)
else:
tous.append(0)
if np.array(tous).sum()==0:
tou=0
else:
tou=1
is_tou.append(tou)
except:
is_seasonal.append(np.nan)
is_tou.append(np.nan)
df['is_demand_rate'] = is_demand
df['is_tier_rate'] = is_tier
df['is_seasonal_rate'] = is_seasonal
df['is_tou_rate'] = is_tou
if industry == 'residential':
self.res_rate_data = df
elif industry == 'commercial':
self.com_rate_data = df
def generate_classification_tree_values(self, industry):
"""
Returns dictionary of branch name: number of rates for each branch
in the rate structure classification tree.
"""
industry = industry.lower()
if industry == 'residential':
df = self.res_rate_data
elif industry == 'commercial':
df = self.com_rate_data
else:
raise ValueError("industry must be 'residential' or 'commercial'!")
class_tree_cnts = {}
class_tree_cnts['demand'] = len(df[df.is_demand_rate==1])
class_tree_cnts['no_demand'] = len(df[df.is_demand_rate==0])
class_tree_cnts['demand/tier'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==1)])
class_tree_cnts['demand/fixed'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==0)])
class_tree_cnts['no_demand/tier'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==1)])
class_tree_cnts['no_demand/fixed'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==0)])
class_tree_cnts['demand/tier/seasonal'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==1)&(df.is_seasonal_rate==1)])
class_tree_cnts['demand/tier/no_seasonal'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==1)&(df.is_seasonal_rate==0)])
class_tree_cnts['demand/fixed/seasonal'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==0)&(df.is_seasonal_rate==1)])
class_tree_cnts['demand/fixed/no_seasonal'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==0)&(df.is_seasonal_rate==0)])
class_tree_cnts['no_demand/tier/seasonal'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==1)&(df.is_seasonal_rate==1)])
class_tree_cnts['no_demand/tier/no_seasonal'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==1)&(df.is_seasonal_rate==0)])
class_tree_cnts['no_demand/fixed/seasonal'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==0)&(df.is_seasonal_rate==1)])
class_tree_cnts['no_demand/fixed/no_seasonal'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==0)&(df.is_seasonal_rate==0)])
class_tree_cnts['demand/tier/seasonal/tou'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==1)&(df.is_seasonal_rate==1)&(df.is_tou_rate==1)])
class_tree_cnts['demand/tier/seasonal/no_tou'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==1)&(df.is_seasonal_rate==1)&(df.is_tou_rate==0)])
class_tree_cnts['demand/tier/no_seasonal/tou'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==1)&(df.is_seasonal_rate==0)&(df.is_tou_rate==1)])
class_tree_cnts['demand/tier/no_seasonal/no_tou'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==1)&(df.is_seasonal_rate==0)&(df.is_tou_rate==0)])
class_tree_cnts['demand/fixed/seasonal/tou'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==0)&(df.is_seasonal_rate==1)&(df.is_tou_rate==1)])
class_tree_cnts['demand/fixed/seasonal/no_tou'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==0)&(df.is_seasonal_rate==1)&(df.is_tou_rate==0)])
class_tree_cnts['demand/fixed/no_seasonal/tou'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==0)&(df.is_seasonal_rate==0)&(df.is_tou_rate==1)])
class_tree_cnts['demand/fixed/no_seasonal/no_tou'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==0)&(df.is_seasonal_rate==0)&(df.is_tou_rate==0)])
class_tree_cnts['no_demand/tier/seasonal/tou'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==1)&(df.is_seasonal_rate==1)&(df.is_tou_rate==1)])
class_tree_cnts['no_demand/tier/seasonal/no_tou'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==1)&(df.is_seasonal_rate==1)&(df.is_tou_rate==0)])
class_tree_cnts['no_demand/tier/no_seasonal/tou'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==1)&(df.is_seasonal_rate==0)&(df.is_tou_rate==1)])
class_tree_cnts['no_demand/tier/no_seasonal/no_tou'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==1)&(df.is_seasonal_rate==0)&(df.is_tou_rate==0)])
class_tree_cnts['no_demand/fixed/seasonal/tou'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==0)&(df.is_seasonal_rate==1)&(df.is_tou_rate==1)])
class_tree_cnts['no_demand/fixed/seasonal/no_tou'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==0)&(df.is_seasonal_rate==1)&(df.is_tou_rate==0)])
class_tree_cnts['no_demand/fixed/no_seasonal/tou'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==0)&(df.is_seasonal_rate==0)&(df.is_tou_rate==1)])
class_tree_cnts['no_demand/fixed/no_seasonal/no_tou'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==0)&(df.is_seasonal_rate==0)&(df.is_tou_rate==0)])
return class_tree_cnts
def filter_demand_rates(self, industry):
"""
Filters rates w/ demand charges.
"""
industry = industry.lower()
if industry == 'residential':
df = self.res_rate_data
elif industry == 'commercial':
df = self.com_rate_data
else:
raise ValueError("industry must be 'residential' or 'commercial'!")
df = df[df.is_demand_rate==0]
if industry == 'residential':
self.res_rate_data = df
elif industry == 'commercial':
self.com_rate_data = df
def filter_on_phrases(self, industry, filters_path='filters/'):
"""Filters rates on lists of filter phrases:
filters/urdb_res_filters.txt for residential rates and
filters/urdb_dcfc_filters.txt for commercial rates.
"""
industry = industry.lower()
if industry == 'residential':
df = self.res_rate_data
filters_file = filters_path + 'urdb_res_filters.txt'
elif industry == 'commercial':
df = self.com_rate_data
filters_file = filters_path + 'urdb_dcfc_filters.txt'
else:
raise ValueError("industry must be 'residential' or 'commercial'!")
with open(filters_file, 'r') as f:
filters = f.read().splitlines()
df = df[((df.name.apply(lambda x: helpers.contains_filter_phrases(x, filters)))==False)&
((df.description.apply(lambda x: helpers.contains_filter_phrases(x, filters)))==False)]
if industry == 'residential':
self.res_rate_data = df
elif industry == 'commercial':
self.com_rate_data = df
def additional_com_rate_filters(self):
"""
Filters commercial rates missing critical fields for approximating the
cost of electricity.
"""
df = self.com_rate_data
# Filter rates that don't use kW, kWh
df = df[(df.demandunits == 'kW')&
(df.flatdemandunits == 'kW')&
(df.demandunits == 'kW')]
# Filter rates in $/day (fixed charge)
df = df[df.fixedchargeunits != '$/day']
# Filter rates w/ min voltages higher than 900 V
df = df[(df.voltageminimum <= 900)|(df.voltageminimum.isnull())]
# Filter rates w/ coincident demand structure (can't predict utility peak dmnd)
df = df[df['coincidentratestructure/period0/tier0rate'].isnull()]
# Filter rates w/o energy rate information
df = df[~df['energyratestructure/period0/tier0rate'].isnull()]
self.com_rate_data = df
def com_rate_preprocessing(self):
"""
Standardizes units and reporting for commercial rates.
"""
df = self.com_rate_data
# Set: fixed charge = 0 when fixed charge == NULL
df['fixedchargefirstmeter'] = df['fixedchargefirstmeter'].fillna(0)
df['fixedchargeunits'] = df['fixedchargeunits'].fillna('$/month')
# Sun-func for converting to $/month
def convert_to_dollars_per_month(units, charges):
monthly_charges = []
for unit, charge in zip(units, charges):
if unit=='$/month':
monthly_charges.append(charge)
elif unit=='$/day':
monthly_charges.append(charge*30)
else:
raise ValueError('"{0}" unit not recognized'.format(unit))
return monthly_charges
df['fixedchargefirstmeter'] = convert_to_dollars_per_month(df['fixedchargeunits'], df['fixedchargefirstmeter'])
df['fixedchargeunits'] = '$/month'
# Min demand constraint = 0 when NULL
df['peakkwcapacitymin'] = df['peakkwcapacitymin'].fillna(0)
df['peakkwhusagemin'] = df['peakkwhusagemin'].fillna(0)
# Max demand contraint = inf when NULL
df['peakkwcapacitymax'] = df['peakkwcapacitymax'].fillna(np.inf)
df['peakkwhusagemax'] = df['peakkwhusagemax'].fillna(np.inf)
self.com_rate_data = df
def combine_rates(self, industry):
"""
Adds 57 columns to self.res_rate_data and self.com_rate_data that are the
sum of the base rate and adjusted rate.
"""
industry = industry.lower()
if industry == 'residential':
df = self.res_rate_data
elif industry == 'commercial':
df = self.com_rate_data
else:
raise ValueError("industry must be 'residential' or 'commercial'!")
#period 0 (11 tiers)
df['energyrate/period0/tier0'] = df['energyratestructure/period0/tier0rate'] + df['energyratestructure/period0/tier0adj'].fillna(0)
df['energyrate/period0/tier1'] = df['energyratestructure/period0/tier1rate'] + df['energyratestructure/period0/tier1adj'].fillna(0)
df['energyrate/period0/tier2'] = df['energyratestructure/period0/tier2rate'] + df['energyratestructure/period0/tier2adj'].fillna(0)
df['energyrate/period0/tier3'] = df['energyratestructure/period0/tier3rate'] + df['energyratestructure/period0/tier3adj'].fillna(0)
df['energyrate/period0/tier4'] = df['energyratestructure/period0/tier4rate'] + df['energyratestructure/period0/tier4adj'].fillna(0)
df['energyrate/period0/tier5'] = df['energyratestructure/period0/tier5rate'] + df['energyratestructure/period0/tier5adj'].fillna(0)
df['energyrate/period0/tier6'] = df['energyratestructure/period0/tier6rate'] + df['energyratestructure/period0/tier6adj'].fillna(0)
df['energyrate/period0/tier7'] = df['energyratestructure/period0/tier7rate'] + df['energyratestructure/period0/tier7adj'].fillna(0)
df['energyrate/period0/tier8'] = df['energyratestructure/period0/tier8rate'] + df['energyratestructure/period0/tier8adj'].fillna(0)
df['energyrate/period0/tier9'] = df['energyratestructure/period0/tier9rate'] + df['energyratestructure/period0/tier9adj'].fillna(0)
df['energyrate/period0/tier10'] = df['energyratestructure/period0/tier10rate'] + df['energyratestructure/period0/tier10adj'].fillna(0)
#period 1 (8 tiers)
df['energyrate/period1/tier0'] = df['energyratestructure/period1/tier0rate'] + df['energyratestructure/period1/tier0adj'].fillna(0)
df['energyrate/period1/tier1'] = df['energyratestructure/period1/tier1rate'] + df['energyratestructure/period1/tier1adj'].fillna(0)
df['energyrate/period1/tier2'] = df['energyratestructure/period1/tier2rate'] + df['energyratestructure/period1/tier2adj'].fillna(0)
df['energyrate/period1/tier3'] = df['energyratestructure/period1/tier3rate'] + df['energyratestructure/period1/tier3adj'].fillna(0)
df['energyrate/period1/tier4'] = df['energyratestructure/period1/tier4rate'] + df['energyratestructure/period1/tier4adj'].fillna(0)
df['energyrate/period1/tier5'] = df['energyratestructure/period1/tier5rate'] + df['energyratestructure/period1/tier5adj'].fillna(0)
df['energyrate/period1/tier6'] = df['energyratestructure/period1/tier6rate'] + df['energyratestructure/period1/tier6adj'].fillna(0)
df['energyrate/period1/tier7'] = df['energyratestructure/period1/tier7rate'] + df['energyratestructure/period1/tier7adj'].fillna(0)
#period 2 (5 tiers)
df['energyrate/period2/tier0'] = df['energyratestructure/period2/tier0rate'] + df['energyratestructure/period2/tier0adj'].fillna(0)
df['energyrate/period2/tier1'] = df['energyratestructure/period2/tier1rate'] + df['energyratestructure/period2/tier1adj'].fillna(0)
df['energyrate/period2/tier2'] = df['energyratestructure/period2/tier2rate'] + df['energyratestructure/period2/tier2adj'].fillna(0)
df['energyrate/period2/tier3'] = df['energyratestructure/period2/tier3rate'] + df['energyratestructure/period2/tier3adj'].fillna(0)
df['energyrate/period2/tier4'] = df['energyratestructure/period2/tier4rate'] + df['energyratestructure/period2/tier4adj'].fillna(0)
#period 3 (5 tiers)
df['energyrate/period3/tier0'] = df['energyratestructure/period3/tier0rate'] + df['energyratestructure/period3/tier0adj'].fillna(0)
df['energyrate/period3/tier1'] = df['energyratestructure/period3/tier1rate'] + df['energyratestructure/period3/tier1adj'].fillna(0)
df['energyrate/period3/tier2'] = df['energyratestructure/period3/tier2rate'] + df['energyratestructure/period3/tier2adj'].fillna(0)
df['energyrate/period3/tier3'] = df['energyratestructure/period3/tier3rate'] + df['energyratestructure/period3/tier3adj'].fillna(0)
df['energyrate/period3/tier4'] = df['energyratestructure/period3/tier4rate'] + df['energyratestructure/period3/tier4adj'].fillna(0)
#period 4 (5 tiers)
df['energyrate/period4/tier0'] = df['energyratestructure/period4/tier0rate'] + df['energyratestructure/period4/tier0adj'].fillna(0)
df['energyrate/period4/tier1'] = df['energyratestructure/period4/tier1rate'] + df['energyratestructure/period4/tier1adj'].fillna(0)
df['energyrate/period4/tier2'] = df['energyratestructure/period4/tier2rate'] + df['energyratestructure/period4/tier2adj'].fillna(0)
df['energyrate/period4/tier3'] = df['energyratestructure/period4/tier3rate'] + df['energyratestructure/period4/tier3adj'].fillna(0)
df['energyrate/period4/tier4'] = df['energyratestructure/period4/tier4rate'] + df['energyratestructure/period4/tier4adj'].fillna(0)
#period 5 (5 tiers)
df['energyrate/period5/tier0'] = df['energyratestructure/period5/tier0rate'] + df['energyratestructure/period5/tier0adj'].fillna(0)
df['energyrate/period5/tier1'] = df['energyratestructure/period5/tier1rate'] + df['energyratestructure/period5/tier1adj'].fillna(0)
df['energyrate/period5/tier2'] = df['energyratestructure/period5/tier2rate'] + df['energyratestructure/period5/tier2adj'].fillna(0)
df['energyrate/period5/tier3'] = df['energyratestructure/period5/tier3rate'] + df['energyratestructure/period5/tier3adj'].fillna(0)
df['energyrate/period5/tier4'] = df['energyratestructure/period5/tier4rate'] + df['energyratestructure/period5/tier4adj'].fillna(0)
#period 6-23
df['energyrate/period6/tier0'] = df['energyratestructure/period6/tier0rate'] + df['energyratestructure/period6/tier0adj'].fillna(0)
df['energyrate/period7/tier0'] = df['energyratestructure/period7/tier0rate'] + df['energyratestructure/period7/tier0adj'].fillna(0)
df['energyrate/period8/tier0'] = df['energyratestructure/period8/tier0rate'] + df['energyratestructure/period8/tier0adj'].fillna(0)
df['energyrate/period9/tier0'] = df['energyratestructure/period9/tier0rate'] + df['energyratestructure/period9/tier0adj'].fillna(0)
df['energyrate/period10/tier0'] = df['energyratestructure/period10/tier0rate'] + df['energyratestructure/period10/tier0adj'].fillna(0)
df['energyrate/period11/tier0'] = df['energyratestructure/period11/tier0rate'] + df['energyratestructure/period11/tier0adj'].fillna(0)
df['energyrate/period12/tier0'] = df['energyratestructure/period12/tier0rate'] + df['energyratestructure/period12/tier0adj'].fillna(0)
df['energyrate/period13/tier0'] = df['energyratestructure/period13/tier0rate'] + df['energyratestructure/period13/tier0adj'].fillna(0)
df['energyrate/period14/tier0'] = df['energyratestructure/period14/tier0rate'] + df['energyratestructure/period14/tier0adj'].fillna(0)
df['energyrate/period15/tier0'] = df['energyratestructure/period15/tier0rate'] + df['energyratestructure/period15/tier0adj'].fillna(0)
df['energyrate/period16/tier0'] = df['energyratestructure/period16/tier0rate'] + df['energyratestructure/period16/tier0adj'].fillna(0)
df['energyrate/period17/tier0'] = df['energyratestructure/period17/tier0rate'] + df['energyratestructure/period17/tier0adj'].fillna(0)
df['energyrate/period18/tier0'] = df['energyratestructure/period18/tier0rate'] + df['energyratestructure/period18/tier0adj'].fillna(0)
df['energyrate/period19/tier0'] = df['energyratestructure/period19/tier0rate'] + df['energyratestructure/period19/tier0adj'].fillna(0)
df['energyrate/period20/tier0'] = df['energyratestructure/period20/tier0rate'] + df['energyratestructure/period20/tier0adj'].fillna(0)
df['energyrate/period21/tier0'] = df['energyratestructure/period21/tier0rate'] + df['energyratestructure/period21/tier0adj'].fillna(0)
df['energyrate/period22/tier0'] = df['energyratestructure/period22/tier0rate'] + df['energyratestructure/period22/tier0adj'].fillna(0)
df['energyrate/period23/tier0'] = df['energyratestructure/period23/tier0rate'] + df['energyratestructure/period23/tier0adj'].fillna(0)
if industry == 'residential':
self.res_rate_data = df
elif industry == 'commercial':
self.com_rate_data = df
def filter_null_rates(self, industry):
"""
Filters rates with no cost information.
"""
industry = industry.lower()
if industry == 'residential':
df = self.res_rate_data
elif industry == 'commercial':
df = self.com_rate_data
else:
raise ValueError("industry must be 'residential' or 'commercial'!")
df = df.dropna(subset=['energyrate/period0/tier0'])
if industry == 'residential':
self.res_rate_data = df
elif industry == 'commercial':
self.com_rate_data = df
def calculate_annual_energy_cost_residential(self, outpath='outputs/cost-of-electricity/urdb-res-rates/'):
"""
Calculates the annualized energy costs for residential rates. Estimates
account for seasonal, tier, and TOU rate structures. Key assumptions
include: 1) Charging occurs with the same freqency irregardless of
weekday vs. weekend or season (time of year); 2) Charging occurs with
the same frequency across rate tiers; 3) For TOU rates, charging will
always occur when it is cheapest to do so (off-peak). Adds
'electricity_cost_per_kwh' col to self.res_rate_data.
"""
# Fixed Rates - incl. seasonal & TOU
res_rates_fixed = self.res_rate_data[self.res_rate_data.is_tier_rate==0]
avg_costs = []
for i in range(len(res_rates_fixed)):
month_rates = []
#weekday
for month in [ls.replace('[', '').replace(',', '').replace(' ', '') for ls in str(res_rates_fixed.iloc[i]['energyweekdayschedule']).split(']')][:-2]: #seasonal
periods = (list(set(month)))
day_rates = []
for per in periods: #TOU
rate_str = 'energyrate/period{}/tier0'.format(per)
rate = res_rates_fixed.iloc[i][rate_str]
day_rates.append(rate)
min_day_rate = min(np.array(day_rates))
month_rates.extend([min_day_rate]*5)
#weekend
for month in [ls.replace('[', '').replace(',', '').replace(' ', '') for ls in str(res_rates_fixed.iloc[i]['energyweekendschedule']).split(']')][:-2]: #seasonal
periods = (list(set(month)))
day_rates = []
for per in periods: #TOU
rate_str = 'energyrate/period{}/tier0'.format(per)
rate = res_rates_fixed.iloc[i][rate_str]
day_rates.append(rate)
min_day_rate = min(np.array(day_rates))
month_rates.extend([min_day_rate]*2)
avg_cost = np.array(month_rates).mean() #dow-weighted cost
avg_costs.append(avg_cost)
res_rates_fixed['electricity_cost_per_kwh'] = avg_costs
# Tier Rates - incl. seasonal & TOU
res_rates_tier = self.res_rate_data[self.res_rate_data.is_tier_rate==1]
avg_costs = []
for i in range(len(res_rates_tier)): #tier rate = avg of all tiers
avg_tier_rates = []
avg_tier_month_rates = []
for p in range(24):
if p==0:
tier_rates = []
for t in range(11):
rate_str = 'energyrate/period{0}/tier{1}'.format(p,t)
rate = res_rates_tier.iloc[i][rate_str]
tier_rates.append(rate)
with warnings.catch_warnings(): #supress warnings
warnings.simplefilter("ignore", category=RuntimeWarning)
avg_tier_rate = np.nanmean(np.array(tier_rates))
avg_tier_rates.append(avg_tier_rate)
elif p==1:
tier_rates = []
for t in range(8):
rate_str = 'energyrate/period{0}/tier{1}'.format(p,t)
rate = res_rates_tier.iloc[i][rate_str]
tier_rates.append(rate)
with warnings.catch_warnings(): #supress warnings
warnings.simplefilter("ignore", category=RuntimeWarning)
avg_tier_rate = np.nanmean(np.array(tier_rates))
avg_tier_rates.append(avg_tier_rate)
elif p>=2 and p<6:
tier_rates = []
for t in range(5):
rate_str = 'energyrate/period{0}/tier{1}'.format(p,t)
rate = res_rates_tier.iloc[i][rate_str]
tier_rates.append(rate)
with warnings.catch_warnings(): #supress warnings
warnings.simplefilter("ignore", category=RuntimeWarning)
avg_tier_rate = np.nanmean(np.array(tier_rates))
avg_tier_rates.append(avg_tier_rate)
else:
rate_str = 'energyrate/period{0}/tier0'.format(p)
rate = res_rates_tier.iloc[i][rate_str]
avg_tier_rates.append(rate)
#weekday rates
months = [ls.replace('[', '').replace(',', '').replace(' ', '') for ls in str(res_rates_tier.iloc[i]['energyweekdayschedule']).split(']')][:-2]
for month in months: #seasonal
periods = (list(set(month)))
avg_rates = []
for per in periods: #TOU
per = int(per)
avg_tier_rate = avg_tier_rates[per]
avg_rates.append(avg_tier_rate)
min_avg_tier_day_rate = min(np.array(avg_rates))
avg_tier_month_rates.extend([min_avg_tier_day_rate]*5)
#weekend rates
months = [ls.replace('[', '').replace(',', '').replace(' ', '') for ls in str(res_rates_tier.iloc[i]['energyweekendschedule']).split(']')][:-2]
for month in months:
periods = (list(set(month)))
avg_rates = []
for per in periods:
per = int(per)
avg_tier_rate = avg_tier_rates[per]
avg_rates.append(avg_tier_rate)
min_avg_tier_day_rate = min(np.array(avg_rates))
avg_tier_month_rates.extend([min_avg_tier_day_rate]*2)
avg_cost = np.array(avg_tier_month_rates).mean() #dow-weighted cost
avg_costs.append(avg_cost)
res_rates_tier['electricity_cost_per_kwh'] = avg_costs
res_df = pd.concat([res_rates_fixed, res_rates_tier], sort=False)
res_df = res_df[res_df.electricity_cost_per_kwh>=0] #remove negative rates
self.res_rate_data = res_df
self.res_rate_data.to_csv(outpath+'res_rates.csv', index=False)
print("Complete, {} rates included.".format(len(self.res_rate_data)))
def calculate_annual_cost_dcfc(self,
dcfc_load_profiles = config.DCFC_PROFILES_DICT,
outpath = 'outputs/cost-of-electricity/urdb-dcfc-rates/',
log_lvl = 1):
"""
Calculates the annualized average daily cost to charge for
commercial rates under an annual dcfc_load_profile. Estimates account
for demand, seasonal, tier, and TOU rate structures. Due to it's
significant runtime, function outputs a .csv at outpath for each profile
in dcfc_load_profiles. The log_lvl parameter must be in [0,1,2] where higher
levels reflect more verbose logs.
"""
assert log_lvl in [0,1,2], "Unexpected log_lvl, must be in [0,1,2]"
if log_lvl == 0:
log_lbl = logging.WARNING
elif log_lvl == 1:
log_lbl = logging.INFO
elif log_lvl == 2:
log_lbl = logging.DEBUG
logging.basicConfig(level=log_lbl)
for p in dcfc_load_profiles.keys():
# Load profile
profile_path = dcfc_load_profiles[p]
profile_df = | pd.read_csv(profile_path, index_col=0, parse_dates=True) | pandas.read_csv |
"""based on https://github.com/quintel/etdataset-
public/tree/master/curves/demand/buildings/space_heating by Quintel"""
from pathlib import Path
import pandas as pd
class BuildingsModel:
"""Class to describe a heating model of a building"""
@classmethod
def from_defaults(cls):
# load G2A parameters
file = Path(__file__).parent / 'data/G2A_parameters.csv'
parameters = pd.read_csv(file)
return cls(parameters)
def __init__(self, parameters):
# assign datetime to parameters
self.parameters = self._assign_datetime_index(parameters)
def _assign_datetime_index(self, dataframe):
# make periodindex
start = f'01-01-01 00:00'
dataframe.index = pd.period_range(start=start, periods=8760, freq='H')
return dataframe
def make_heat_demand_profile(self, temperature, wind_speed):
"""
effective temperature is defined as daily average temperature in C minus
daily average wind speed in m/s divided by 1.5.
"""
if len(temperature) != 8760:
raise ValueError('temperature must contain 8760 values')
if len(wind_speed) != 8760:
raise ValueError('wind_speed must contain 8760 values')
# merge datapoints
profile = pd.concat([temperature, wind_speed], axis=1)
profile.columns = ['temperature', 'wind_speed']
# assign periodindex
profile = self._assign_datetime_index(profile)
# evaluate daily averages
grouper = | pd.Grouper(freq='D') | pandas.Grouper |
# coding: utf-8
# Import libraries
import pandas as pd
from pandas import ExcelWriter
import numpy as np
import pickle
from sklearn.linear_model import LinearRegression
from sklearn import preprocessing
from sklearn.linear_model import LassoCV
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
def feature_selection(gene_set, n_data_matrix, type):
"""
The FEATURE_SELECTION operation executes the feature selection procedure per gene set and per data matrix, according to the specified type of selection: it takes as input the name of the gene set to consider and the number of the model to build (i.e., the number of the data matrix to consider) and performs the specified feature selection for all the genes of interest in the selected set. Results are exported locally either in Excel or text files.
:param gene_set: the set of genes of interest to analyze
:param n_data_matrix: number identifying the data matrix to analyze (only 2,3 and 5 values are permitted)
:param type: the type of feature selection to perform (possibile values are {'ffs_default','ffs_no_reval','all','lasso','lasso_all'})
Example::
import genereg as gr
gr.FeatureSelection.feature_selection(gene_set='DNA_REPAIR', n_data_matrix=2, type=ffs_default)
gr.FeatureSelection.feature_selection(gene_set='DNA_REPAIR', n_data_matrix=3, type=ffs_default)
gr.FeatureSelection.feature_selection(gene_set='DNA_REPAIR', n_data_matrix=5, type=ffs_default)
"""
# Check input parameters
if n_data_matrix not in [2, 3, 5]:
raise ValueError('Data Matrix ERROR! Possible values: {2,3,5}')
# Define the model to create
model = str(n_data_matrix)
# Import the list of genes of interest and extract in a list the Gene Symbols of all the genes belonging to the current gene set
EntrezConversion_df = pd.read_excel('./Genes_of_Interest.xlsx',sheetname='Sheet1',header=0,converters={'GENE_SYMBOL':str,'ENTREZ_GENE_ID':str,'GENE_SET':str})
SYMs_current_pathway = []
for index, row in EntrezConversion_df.iterrows():
sym = row['GENE_SYMBOL']
path = row['GENE_SET']
if path == gene_set:
SYMs_current_pathway.append(sym)
if (type == 'ffs_default') or (type == 'ffs_no_reval') or (type == 'all'):
# Create a dataframe to store results of feature selection for each gene
if ((type == 'ffs_default') or (type == 'ffs_no_reval')) and (model == '2'):
summary_results_df = pd.DataFrame(index=SYMs_current_pathway, columns=['TOT Inital N° Features','Discarded Features','N° Features Selected'])
elif (type == 'all'):
summary_results_df = | pd.DataFrame(index=SYMs_current_pathway, columns=['TOT Inital N° Features','Discarded Features','N° Features Selected']) | pandas.DataFrame |
import datetime as dt
import re
import numpy as np
import pandas as pd
import pytest
import scipy.interpolate
from base import _DataBaseCruncherTester
from pyam import IamDataFrame
from silicone.database_crunchers import (
Interpolation,
LinearInterpolation,
ScenarioAndModelSpecificInterpolate,
)
_ma = "model_a"
_mb = "model_b"
_mc = "model_c"
_sa = "scen_a"
_sb = "scen_b"
_sc = "scen_c"
_sd = "scen_d"
_se = "scen_e"
_sf = "scen_e"
_eco2 = "Emissions|CO2"
_gtc = "Gt C/yr"
_ech4 = "Emissions|CH4"
_mtch4 = "Mt CH4/yr"
_ec5f12 = "Emissions|HFC|C5F12"
_ktc5f12 = "kt C5F12/yr"
_ec2f6 = "Emissions|HFC|C2F6"
_ktc2f6 = "kt C2F6/yr"
_msrvu = ["model", "scenario", "region", "variable", "unit"]
class TestDatabaseCruncherScenarioAndModelSpecificInterpolate(_DataBaseCruncherTester):
tclass = ScenarioAndModelSpecificInterpolate
# The units in this dataframe are intentionally illogical for C5F12
tdb = pd.DataFrame(
[
[_ma, _sa, "World", _eco2, _gtc, 1, 2, 3, 4],
[_ma, _sb, "World", _eco2, _gtc, 1, 2, 2, 1],
[_mb, _sa, "World", _eco2, _gtc, 0.5, 3.5, 3.5, 0.5],
[_mb, _sb, "World", _eco2, _gtc, 3.5, 0.5, 0.5, 3.5],
[_ma, _sa, "World", _ech4, _mtch4, 100, 200, 300, 400],
[_ma, _sb, "World", _ech4, _mtch4, 100, 200, 250, 300],
[_mb, _sa, "World", _ech4, _mtch4, 220, 260, 250, 230],
[_mb, _sb, "World", _ech4, _mtch4, 50, 200, 500, 800],
[_ma, _sa, "World", _ec5f12, _mtch4, 3.14, 4, 5, 6],
[_ma, _sa, "World", _ec2f6, _ktc2f6, 1.2, 1.5, 1, 0.5],
],
columns=_msrvu + [2010, 2030, 2050, 2070],
)
large_db = pd.DataFrame(
[
[_ma, _sa, "World", _eco2, _gtc, 1],
[_ma, _sb, "World", _eco2, _gtc, 5],
[_mb, _sc, "World", _eco2, _gtc, 0.5],
[_mb, _sd, "World", _eco2, _gtc, 3.5],
[_mb, _se, "World", _eco2, _gtc, 0.5],
[_ma, _sa, "World", _ech4, _mtch4, 100],
[_ma, _sb, "World", _ech4, _mtch4, 170],
[_mb, _sc, "World", _ech4, _mtch4, 220],
[_mb, _sd, "World", _ech4, _mtch4, 50],
[_mb, _se, "World", _ech4, _mtch4, 150],
],
columns=_msrvu + [2010],
)
small_db = pd.DataFrame(
[[_mb, _sa, "World", _eco2, _gtc, 1.2], [_ma, _sb, "World", _eco2, _gtc, 2.3]],
columns=_msrvu + [2010],
)
tdownscale_df = pd.DataFrame(
[
[_mc, _sa, "World", _eco2, _gtc, 1, 2, 3, 4],
[_mc, _sb, "World", _eco2, _gtc, 0.5, 0.5, 0.5, 0.5],
[_mc, _sc, "World", _eco2, _gtc, 5, 5, 5, 5],
[_ma, _sc, "World", _eco2, _gtc, 1.5, 2.5, 2.8, 1.8],
],
columns=_msrvu + [2010, 2030, 2050, 2070],
)
simple_df = pd.DataFrame(
[
[_mc, _sa, "World", _eco2, _gtc, 0, 1000, 5000],
[_mc, _sb, "World", _eco2, _gtc, 1, 1000, 5000],
[_mc, _sa, "World", _ech4, _mtch4, 0, 300, 500],
[_mc, _sb, "World", _ech4, _mtch4, 1, 300, 500],
],
columns=_msrvu + [2010, 2030, 2050],
)
def test_database_crunchers_with_filters(self, test_db, simple_df):
test_db = self._adjust_time_style_to_match(test_db, simple_df)
tcruncher_filtered = self.tclass(test_db)
tcruncher_generic = Interpolation(test_db)
tcruncher_linear = LinearInterpolation(test_db)
filtered_cruncher = tcruncher_filtered.derive_relationship(
"Emissions|CO2", ["Emissions|CH4"]
)
infilled_filter = filtered_cruncher(simple_df)
linear_cruncher = tcruncher_linear.derive_relationship(
"Emissions|CO2", ["Emissions|CH4"]
)
infilled_linear = linear_cruncher(simple_df)
generic_cruncher = tcruncher_generic.derive_relationship(
"Emissions|CO2", ["Emissions|CH4"], interpkind="linear"
)
infilled_generic = generic_cruncher(simple_df)
pd.testing.assert_frame_equal(infilled_filter.data, infilled_linear.data)
| pd.testing.assert_frame_equal(infilled_generic.data, infilled_linear.data) | pandas.testing.assert_frame_equal |
import pandas as pd
from sklearn.metrics.pairwise import haversine_distances
from math import radians, degrees
from decouple import config
import numpy as np
from datetime import datetime, timedelta
from models.reoptimization_config import *
from main_config import *
# Sets
num_nodes = 2 * n
num_nodes_and_depots = (
2 * num_vehicles + 2 * n
) # num_vehicles is fetched from reopt config
# Costs and penalties
C_D = 1 # per vehicle
C_F = 60
C_T = 60
# Capacity per vehicle
Q_S = 5
Q_W = 1
# Allowed excess ride time
F = 0.5
# Weighting of Operational Costs vs Quality of Service
alpha = 0.5
# Different parameters per node
df = pd.read_csv(initial_events_path, nrows=n)
# Load for each request
L_S = df["Number of Passengers"].tolist()
L_W = df["Wheelchair"].tolist()
# Lat and lon for each request
origin_lat_lon = list(zip(np.deg2rad(df["Origin Lat"]), np.deg2rad(df["Origin Lng"])))
destination_lat_lon = list(
zip(np.deg2rad(df["Destination Lat"]), np.deg2rad(df["Destination Lng"]))
)
request_lat_lon = origin_lat_lon + destination_lat_lon
# Positions in degrees
origin_lat_lon_deg = list(zip(df["Origin Lat"], df["Origin Lng"]))
destination_lat_lon_deg = list(zip(df["Destination Lat"], df["Destination Lng"]))
request_lat_lon_deg = origin_lat_lon_deg + destination_lat_lon_deg
vehicle_lat_lon = []
vehicle_lat_lon_deg = []
# Origins for each vehicle
for i in range(num_vehicles):
vehicle_lat_lon.append((radians(59.946829115276145), radians(10.779841653639243)))
vehicle_lat_lon_deg.append((59.946829115276145, 10.779841653639243))
# Destinations for each vehicle
for i in range(num_vehicles):
vehicle_lat_lon.append((radians(59.946829115276145), radians(10.779841653639243)))
vehicle_lat_lon_deg.append((59.946829115276145, 10.779841653639243))
# Positions
lat_lon = request_lat_lon + vehicle_lat_lon
Position = request_lat_lon_deg + vehicle_lat_lon_deg
# Distance matrix
D_ij = haversine_distances(lat_lon, lat_lon) * 6371
# Travel time matrix
speed = 40
T_ij = np.empty(shape=(num_nodes_and_depots, num_nodes_and_depots), dtype=timedelta)
for i in range(num_nodes_and_depots):
for j in range(num_nodes_and_depots):
T_ij[i][j] = timedelta(hours=(D_ij[i][j] / speed)).total_seconds() / 3600
# Time windows
T_S_L = pd.to_datetime(df["T_S_L_P"]).tolist() + pd.to_datetime(df["T_S_L_D"]).tolist()
T_S_U = pd.to_datetime(df["T_S_U_P"]).tolist() + | pd.to_datetime(df["T_S_U_D"]) | pandas.to_datetime |
from flask import Flask, escape, request
from flask_cors import cross_origin
from flask import current_app, flash, jsonify, make_response, redirect, request, url_for
import time
from math import sin, cos, sqrt, atan2, radians
from networkx.readwrite import json_graph
import networkx as nx
from functools import reduce
import pandas as pd
import functions
app = Flask(__name__)
allowed_keys = ['device_key' , 'lat', 'long', 'duration', 'state', 'zipcode', 'observation_count' ]
@app.route('/get_data', methods=['POST'])
@cross_origin()
def get_data():
global df,filtered_df
#pdb.set_trace()
try:
print(request.json)
selections = request.json.get('filters','')
def appendQuotes(key,value,df):
if df.dtypes[key] == 'O':
value = f'"{str(value)}"'
return value
filters = '(' + ') & ('.join([ ' | '.join([k + '==' + appendQuotes(k,i,df) for i in v]) for k,v in selections.items() if k in allowed_keys and k != '']) + ')'
filters = filters.strip()
expr = ''
#pdb.set_trace()
if selections.get("index"):
expr = 'index < ' + selectionsget("index") + ' & '
if filters != '()':
expr += filters
#pdb.set_trace()
if '&' not in expr:
expr = expr.strip('()')
filtered_df = df
print(expr)
if expr.strip():
filtered_df = df.query(expr)
filtered_df = filtered_df.iloc[:int(selections.get("max_items",[200])[0])]
#pdb.set_trace()
filtered_df['t0'] = filtered_df.groupby('device_key')['unix_time'].transform(lambda x: x.iat[0])
filtered_df['lat0'] = filtered_df.groupby('device_key')['lat'].transform(lambda x: x.iat[0])
filtered_df['lon0'] = filtered_df.groupby('device_key')['long'].transform(lambda x: x.iat[0])
filtered_df['dist_m'] = filtered_df.apply(
lambda row: getDistanceFromLatLonInKm(
lat1=row['lat'],
lon1=row['long'],
lat2=row['lat0'],
lon2=row['lon0']
),
axis=1
)
# create a new column for velocity
filtered_df['velocity_mps'] = filtered_df.apply(
lambda row: calc_velocity(
dist_m=row['dist_m'],
time_start=row['t0'],
time_end=row['unix_time']
),
axis=1
)
filtered_df['coordinates'] = filtered_df[['long','lat']].values.tolist()
#filtered_df.drop(columns=['long','lat'],inplace=True)
fdf = filtered_df.groupby('device_key').apply(lambda x : x.to_dict(orient='list')).to_list()
#pdb.set_trace()
response = {'data': fdf } #recur_dictify(fdf.reset_index())
except Exception as e:
response = {'error':'Exception while fetching data:'+str(e)}
return response
def recur_dictify(frame):
if len(frame.columns) == 1:
if frame.values.size == 1: return frame.values[0][0]
return frame.values.squeeze()
grouped = frame.groupby(frame.columns[0])
d = {k: recur_dictify(g.iloc[:,1:]) for k,g in grouped}
return d
@app.route('/get_form_fields', methods=['GET'])
@cross_origin()
def get_form_fields():
global df,filtered_df
print(request.args)
#pdb.set_trace()
col = request.args.get("column")
key = request.args.get("key")
if not col or not key:
return {'error':'Specify column as column name form which to search and key as search value. Can be only done on str columns'}
if ((df[col].dtype != 'object')):
return {'error': 'This endpoint can only return searches on str dataypes'}
retlist = df[df[col].str.contains(key,case=False)][col].unique()[:int(request.args.get("max_items",10))].tolist()
response={col:retlist}
print(response)
return response
"""
@app.route('/get_graph_metrics', methods=['GET'])
@cross_origin()
def get_graph_metrics():
global G
damping_factor=0.85
personalization={}
json_g = json_graph.node_link_data(G)
pagerank = nx.pagerank(G, alpha=damping_factor,personalization=personalization)
harmonic_centrality = nx.harmonic_centrality(G,distance='weight')
pdb.set_trace()
response = {}
if json_g['nodes']:
response['overlap'] = 'success'
else:
response['overlap'] = 'error'
return response
"""
@app.route('/get_graph', methods=['GET'])
@cross_origin()
def get_graph_data():
global df, filtered_df, G
#pdb.set_trace()
print(filtered_df.head(1000).describe())
print("Start time: " + str(time.time()))
cdf, centroids = functions.get_clusters(filtered_df.head(1000))
graphdf = functions.get_places(cdf,overlap_threshold=900)
nodes = cdf['device_key'].unique()
G = functions.get_graph(graphdf)
json_g = json_graph.node_link_data(G)
pagerank = nx.pagerank(G, alpha=0.85,personalization={})
harmonic_centrality = nx.harmonic_centrality(G,distance='weight')
response = {}
#pdb.set_trace()
if json_g['nodes']:
response['graph'] = json_g
for k in json_g['nodes']:
id = k['id']
k['pagerank'] = pagerank[id]
k['harmonic_centrality'] = harmonic_centrality[id]
response['overlap'] = 'success'
else:
response['overlap'] = "No data returned. There is no overlap between the nodes."
print("End time:" + str(time.time()))
print(response)
return response
def getDistanceFromLatLonInKm(lat1,lon1,lat2,lon2):
R = 6371 # Radius of the earth in m
dLat = radians(lat2-lat1)
dLon = radians(lon2-lon1)
rLat1 = radians(lat1)
rLat2 = radians(lat2)
a = sin(dLat/2) * sin(dLat/2) + cos(rLat1) * cos(rLat2) * sin(dLon/2) * sin(dLon/2)
c = 2 * atan2(sqrt(a), sqrt(1-a))
d = R * c # Distance in meters
return d
def calc_velocity(dist_m, time_start, time_end):
"""Return 0 if time_start == time_end, avoid dividing by 0"""
return dist_m / (time_end - time_start) if time_end > time_start else 0
import pdb
if __name__ == "__main__":
lst_str_cols = ['device_key','zipcode']
df = | pd.read_parquet('../../data/tmp.parquet', engine='pyarrow') | pandas.read_parquet |
"""
author: <NAME> & <NAME>
Implementation of the climate data-utils for our training framework (i.e. on
synthetic SDE data).
This is mainly a copy of the data_utils.py file from the official implementation
of GRU-ODE-Bayes: https://github.com/edebrouwer/gru_ode_bayes.
"""
import torch
import pandas as pd
import numpy as np
import math
from torch.utils.data import Dataset, DataLoader
from scipy import special
class ODE_DatasetNumpy(Dataset):
"""Dataset class for ODE type of data. Fed from numpy arrays.
Args:
times array of times
ids ids (ints) of patients (samples)
values value matrix, each line is one observation
masks observation mask (1.0 means observed, 0.0 missing)
"""
def __init__(self, times, ids, values, masks):
assert times.shape[0] == ids.shape[0]
assert times.shape[0] == values.shape[0]
assert values.shape == masks.shape
times = times.astype(np.float32)
values = values.astype(np.float32)
masks = masks.astype(np.float32)
df_values = pd.DataFrame(values, columns=[f"Value_{i}" for i in range(values.shape[1])])
df_masks = pd.DataFrame(masks, columns=[f"Mask_{i}" for i in range(masks.shape[1])])
self.df = pd.concat([
pd.Series(times, name="Time"),
pd.Series(ids, name="ID"),
df_values,
df_masks,
], axis=1)
self.df.sort_values("Time", inplace=True)
self.length = self.df["ID"].nunique()
self.df.set_index("ID", inplace=True)
def __len__(self):
return self.length
def __getitem__(self, idx):
subset = self.df.loc[idx]
covs = self.df.loc[idx,"Time"] # To change !! TODO: this does not return cov from data
## returning also idx to allow empty samples
return {"idx": idx, "y": 0, "path": subset, "cov": covs }
class ODE_Dataset(Dataset):
"""
Dataset class for ODE type of data. With 2 values.
Can be fed with either a csv file containg the dataframe or directly with a panda dataframe.
One can further provide samples idx that will be used (for training / validation split purposes.)
"""
def __init__(self, csv_file=None, cov_file=None, label_file=None, panda_df=None, cov_df=None, label_df=None, root_dir="./", t_mult=1.0, idx=None, jitter_time=0, validation = False, val_options = None):
"""
Args:
csv_file CSV file to load the dataset from
panda_df alternatively use pandas df instead of CSV file
root_dir directory of the CSV file
t_mult multiplier for time values (1.0 default)
jitter_time jitter size (0 means no jitter), to add randomly to Time.
Jitter is added before multiplying with t_mult
validation boolean. True if this dataset is for validation purposes
val_options dictionnary with validation dataset options.
T_val : Time after which observations are considered as test samples
max_val_samples : maximum number of test observations per trajectory.
"""
self.validation = validation
if panda_df is not None:
assert (csv_file is None), "Only one feeding option should be provided, not both"
self.df = panda_df
self.cov_df = cov_df
self.label_df = label_df
else:
assert (csv_file is not None) , "At least one feeding option required !"
self.df = pd.read_csv(root_dir + "/" + csv_file)
assert self.df.columns[0]=="ID"
if label_file is None:
self.label_df = None
else:
self.label_df = pd.read_csv(root_dir + "/" + label_file)
assert self.label_df.columns[0]=="ID"
assert self.label_df.columns[1]=="label"
if cov_file is None :
self.cov_df = None
else:
self.cov_df = | pd.read_csv(root_dir + "/" + cov_file) | pandas.read_csv |
# %%
import datetime
import pandas
import altair
from plot_shared import plot_points_average_and_trend
# %%
df = pandas.read_csv('https://api.coronavirus.data.gov.uk/v2/data?areaType=nation&metric=cumPeopleVaccinatedFirstDoseByPublishDate&metric=cumPeopleVaccinatedSecondDoseByPublishDate&format=csv')
df.rename(columns={
'cumPeopleVaccinatedFirstDoseByPublishDate': 'First',
'cumPeopleVaccinatedSecondDoseByPublishDate': 'Second',
'areaName': 'Nation',
'date': 'Publication Date'
}, inplace=True)
df = df.drop(columns=['areaCode','areaType']).melt(id_vars=['Publication Date','Nation'], var_name='Dose', value_name='People')
# %%
ni = pandas.read_csv('../sam/doses.csv')
ni['Dose'] = ni['Dose'].str.replace('Dose 1', 'First')
ni['Dose'] = ni['Dose'].str.replace('Dose 2', 'Second')
ni['Dose'] = ni['Dose'].str.replace('Dose 3', 'Third')
# %%
history = df[df['Nation']=='Northern Ireland'][['Publication Date','Dose','People']]
ni.rename(columns={'Date':'Publication Date','Total':'People'}, inplace=True)
all = history.merge(ni, on=['Publication Date','Dose'], how='outer', suffixes=('','_bot'))
all['People'] = all['People'].fillna(all['People_bot'])
all = all[['Publication Date','Dose','People']]
# %%
boosters = all[all['Dose']=='Booster'][['Publication Date','People']]
boosters['Publication Date'] = | pandas.to_datetime(boosters['Publication Date']) | pandas.to_datetime |
r"""Plots figures and tables for the paper.
## Overview
The experiments output logging directories which are large and difficult to
manage. This script first gathers the relevant data from these directories into
one file, `figure_data.json`. `figure_data.json` is then passed around in order
to make the figures.
## Generating `figure_data.json` from logging directories
(If you already have `figure_data.json`, skip this section)
After running your experiments, arrange your logging directories as follows:
logs/ # Any name is okay; you could even put it in `.` but that is messy.
- manifest.yaml
- logging-directory-1
- logging-directory-2
- ...
`manifest.yaml` lists the directories that were generated from your experiments.
It must be located in the same directory as all the logging directories, and it
must have the following format:
Paper: # Top-level object.
Environment 1:
old_min_obj: -8765.4321...
min_obj: -1234.5678...
max_obj: 3210.5678...
archive_size: 1024
algorithms:
Algorithm 1:
- dir: logging-directory-1...
seed: 1
- dir: logging-directory-2...
seed: 3 # Make sure this matches the seed for the experiment.
...
Algorithm 2:
- exclude # Causes this algorithm to be excluded.
- dir: logging-directory-3...
seed: 1
...
...
...
The fields are as follows:
- `old_min_obj` and `min_obj` are used for the QD score calculations --
`old_min_obj` is the min that was used for calculating the QD score during the
experiment, and after the experiment, we recalculate the QD score with the
`min_obj`. This is necessary since the final QD score offset is based on the
lowest objective value that was ever inserted into the archive (see the
`find_min` function below), and we do not know this value during the
experiments.
- `max_obj` is the maximum objective in the environment
- `archive_size` is the number of cells in the archive grid
You can leave `min_obj` blank for now. We'll generate it in the next step.
Once you have this manifest, run the following commands (replace
`logs/manifest.yaml` with the path to your manifest). Run all Python commands in
the Singularity container associated with this project, e.g. run `make shell`
to start a shell in the container and run the commands in that shell.
# Collect min objectives for each environment with the following command,
# and manually add these under the min_obj field in the manifest.
python -m src.analysis.figures find_min logs/manifest.yaml
# Run robustness calculations. See analysis/robustness.py for more details.
# This command will take a while and probably requires a multi-core machine.
bash scripts/run_robustness_local.sh logs/manifest.yaml 42 8
# Generate `figure_data.json`
python -m src.analysis.figures collect logs/manifest.yaml
For reference, figure_data.json looks like this:
{
"Env 1": {
# List of entries for the algorithm, where each entry contains data
# from a logging directory.
"Algo 1": [
{
# Number of evaluations completed after each iteration. Used
# on x-axis.
"Evaluations": [...],
# Metrics with a series of values from each generation. Some
# lists have length `gens + 1` because they include a 0 at
# the start, and others only have length `gens`.
"series_metrics": {
"QD Score": [...],
# QD Score divided by max QD score. Only used in
# statistical analysis.
"Normalized QD Score": [...],
"Archive Coverage": [...],
"Best Performance": [...],
}
# Metrics that only have a final value.
"point_metrics": {
# Average robustness of elites in final archive.
"Mean Elite Robustness": XXX,
# Mean Elite Robustness divided by objective range (max_obj -
# min_obj). Only used in statistical analysis.
"Normalized Mean Elite Robustness": XXX,
# Total runtime in hours.
"Runtime (Hours)": XXX,
},
},
...
],
...
},
...
}
## Generating figures
Run these commands to generate all figures associated with the paper (replace
`figure_data.json` with the path to your figure_data). The default values are
set such that these commands generate the versions shown in the paper. Run all
Python commands in the Singularity container associated with this project, e.g.
run `make shell` to start a shell in the container and run the commands in that
shell.
# For the comparison figure.
python -m src.analysis.figures comparison figure_data.json
# For the higher-res version of the comparison figure.
python -m src.analysis.figures comparison_high_res figure_data.json
# To generate the latex source for the tables in the paper.
python -m src.analysis.figures table figure_data.json
# To generate statistical test results.
python -m src.analysis.figures tests figure_data.json
If including the Latex files output by these commands, make sure to also put
these commands in your paper:
\usepackage{booktabs}
\usepackage{multirow}
\usepackage{array}
\newcolumntype{L}[1]
{>{\raggedright\let\newline\\\arraybackslash\hspace{0pt}}m{#1}}
\newcolumntype{C}[1]
{>{\centering\let\newline\\\arraybackslash\hspace{0pt}}m{#1}}
\newcolumntype{R}[1]
{>{\raggedleft\let\newline\\\arraybackslash\hspace{0pt}}m{#1}}
## Help
Run the following for more help:
python -m src.analysis.figures COMMAND --help
"""
import itertools
import json
import pickle as pkl
import shutil
from collections import OrderedDict, defaultdict
from pathlib import Path
from typing import Iterable, List
import fire
import gin
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pingouin
import scipy.stats
import slugify
from logdir import LogDir
from loguru import logger
from ruamel.yaml import YAML
from statsmodels.graphics.factorplots import interaction_plot
import seaborn as sns
from src.analysis.utils import (is_me_es, load_experiment, load_me_es_objs,
load_metrics)
from src.mpl_styles import QUALITATIVE_COLORS
from src.mpl_styles.utils import mpl_style_file
# Metrics which we record in figure_data.json but do not plot.
METRIC_BLACKLIST = [
"Normalized QD Score",
"Normalized Mean Elite Robustness",
"Normalized QD Score AUC",
]
# Reordered version of Seaborn colorblind palette.
COLORBLIND_REORDERED = np.array(
sns.color_palette("colorblind"))[[0, 1, 2, 8, 4, 3, 5, 6, 7, 9]]
def load_manifest(manifest: str, entry: str = "Paper"):
"""Loads the data and root directory of the manifest."""
manifest = Path(manifest)
data = YAML().load(manifest)[entry]
root_dir = manifest.parent
return data, root_dir
def exclude_from_manifest(paper_data, env, algo):
"""Whether to exclude the given algo."""
return "exclude" in paper_data[env]["algorithms"][algo]
def verify_manifest(paper_data, root_dir: Path, reps: int):
"""Checks logging directories for correctness."""
for env in paper_data:
for algo in paper_data[env]["algorithms"]:
if exclude_from_manifest(paper_data, env, algo):
continue
results = paper_data[env]["algorithms"][algo]
name = f"(Env: {env} Algo: {algo})"
if reps is not None:
# Check that the reps are correct.
assert len(results) == reps, \
f"{name} {len(results)} dirs, needs {reps}"
# Check logging dirs are unique.
logdirs = [d["dir"] for d in results]
assert len(logdirs) == len(set(logdirs)), \
f"{name} {len(logdirs)} dirs listed, {len(set(logdirs))} unique"
# Check seeds are unique.
seeds = [d["seed"] for d in results]
assert len(seeds) == len(set(seeds)), \
f"{name} {len(seeds)} seeds listed, {len(set(seeds))} unique"
# Check seeds match.
for d in results:
logdir = LogDir("tmp", custom_dir=root_dir / d["dir"])
actual_seed = int(logdir.pfile("seed").open("r").read())
assert actual_seed == d["seed"], \
(f"{name} Seed for {logdir} ({d['seed']}) does not match "
f"actual seed ({actual_seed})")
def find_min(manifest: str):
"""Finds the min obj that was inserted into the archive in any experiment.
This minimum objective is calculated for each of the environments. It is
then used e.g. in computing the QD score.
Args:
manifest: Path to YAML file holding paths to logging directories.
"""
paper_data, root_dir = load_manifest(manifest)
data = defaultdict(list)
for env in paper_data:
for algo in paper_data[env]["algorithms"]:
if exclude_from_manifest(paper_data, env, algo):
continue
data[env].extend(
[d["dir"] for d in paper_data[env]["algorithms"][algo]])
for env in data:
logdirs = data[env]
min_score = np.inf
min_logdir = None
for d in logdirs:
logdir = load_experiment(root_dir / d)
if is_me_es():
archive_history = load_me_es_objs(
logdir, return_archive=True).history()
else:
archive_type = str(gin.query_parameter("Manager.archive_type"))
if archive_type != "@GridArchive":
print(logdir.logdir)
raise TypeError(f"Unknown archive type {archive_type}")
archive_history_path = logdir.pfile("archive_history.pkl")
with archive_history_path.open("rb") as file:
archive_history = pkl.load(file)
for gen_history in archive_history:
for obj, _ in gen_history:
min_score = min(min_score, obj)
min_logdir = root_dir / d
logger.info("{}: {} ({})", env, min_score, min_logdir)
def collect(manifest: str, reps: int = 5, output: str = "figure_data.json"):
"""Collects data from logging directories and aggregates into a single JSON.
Args:
manifest: Path to YAML file holding paths to logging directories.
reps: Number of times each experiment should be repeated.
output: Path to save results.
"""
logger.info("Loading manifest")
paper_data, root_dir = load_manifest(manifest)
logger.info("Verifying logdirs")
verify_manifest(paper_data, root_dir, reps)
# Mapping from the name in the raw metrics to the name in the output.
metric_names = {
"Actual QD Score": "QD Score",
# "Normalized QD Score" gets added when we calculate "QD Score".
"Archive Coverage": "Archive Coverage",
"Best Performance": "Best Performance",
}
figure_data = {}
logger.info("Loading Plot Data")
for env in paper_data:
figure_data[env] = OrderedDict()
env_data = paper_data[env]
for algo in env_data["algorithms"]:
if exclude_from_manifest(paper_data, env, algo):
continue
figure_data[env][algo] = []
for entry in env_data["algorithms"][algo]:
figure_data[env][algo].append(cur_data := {})
logdir = load_experiment(root_dir / entry["dir"])
metrics = load_metrics(logdir)
total_evals = metrics.get_single("Total Evals")["y"]
cur_data["Evaluations"] = total_evals
cur_data["series_metrics"] = {}
qd_score_auc, norm_qd_score_auc = None, None
for actual_name, figure_name in metric_names.items():
data = metrics.get_single(actual_name)
if actual_name == "Actual QD Score":
# Adjust QD score with new min -- we exclude the first
# element since it is always 0.
archive_size = np.array(
metrics.get_single("Archive Size")["y"])[1:]
qd_score = np.array(data["y"])[1:]
qd_score = (
qd_score + archive_size *
(env_data["old_min_obj"] - env_data["min_obj"]))
data["y"][1:] = qd_score.tolist()
cur_data["series_metrics"][figure_name] = data["y"]
# Also add in normalized QD score.
max_qd_score = env_data["archive_size"] * (
env_data["max_obj"] - env_data["min_obj"])
norm_qd_score = (np.array(data["y"]) /
max_qd_score).tolist()
cur_data["series_metrics"][
"Normalized QD Score"] = norm_qd_score
# Finally, add in AUCs - for this paper, it's reasonable
# to assume that every generation has the same number of
# evals.
evals_per_gen = total_evals[-1] / (len(total_evals) - 1)
qd_score_auc = sum(qd_score) * evals_per_gen
norm_qd_score_auc = sum(norm_qd_score) * evals_per_gen
else:
cur_data["series_metrics"][figure_name] = data["y"]
# Load robustness from the data created by robustness.py.
robustness_file = logdir.file(
f"archive/archive_{len(total_evals) - 1}_robustness.pkl")
robustness = pd.read_pickle(
robustness_file)["robustness"].mean()
cur_data["point_metrics"] = {
"Mean Elite Robustness":
robustness,
"Normalized Mean Elite Robustness":
robustness /
(env_data["max_obj"] - env_data["min_obj"]),
"Runtime (Hours)":
metrics.get_single("Cum Time")["y"][-1] / 3600,
"QD Score AUC":
qd_score_auc,
"Normalized QD Score AUC":
norm_qd_score_auc,
}
logger.info("Saving to {}", output)
with open(output, "w") as file:
json.dump(figure_data, file)
logger.info("Done")
def legend_info(names: Iterable, palette: dict, markers: dict):
"""Creates legend handles and labels for the given palette and markers.
Yes, this is kind of a hack.
"""
_, ax = plt.subplots(1, 1)
for name in names:
# We just need the legend handles, so the plot itself is bogus.
ax.plot(
[0],
[0],
label=name,
color=palette[name],
marker=markers[name],
markeredgewidth="0.75",
markeredgecolor="white",
)
return ax.get_legend_handles_labels()
def load_figure_data(figure_data: str):
with open(figure_data, "r") as file:
return json.load(file)
def metric_from_entry(entry, metric):
"""Retrieves the metric from either series_metrics or point_metrics.
entry is a dict in the list associated with figure_data[env][algo]
"""
return (entry["series_metrics"][metric][-1] if metric
in entry["series_metrics"] else entry["point_metrics"][metric])
def comparison(
figure_data: str = "./figure_data.json",
outputs: List[str] = ("comparison.pdf", "comparison.png"),
palette: str = "colorblind_reordered",
height: float = 1.9,
plot_every: int = 25,
):
"""Plots the figure comparing metrics of all algorithms.
Args:
figure_data: Path to JSON file with figure data.
outputs: File to output the figure to.
palette: Either a Seaborn color palette, "qualitative_colors" for
QUALITATIVE_COLORS, or "colorblind_reordered" for
COLORBLIND_REORDERED.
height: Height in inches of each plot.
plot_every: How frequently to plot points, e.g. plot every 100th point.
"""
figure_data = load_figure_data(figure_data)
plot_data = {
"Environment": [],
"Algorithm": [],
"Metric": [],
"Evaluations": [],
"Score": [],
}
logger.info("Loading Plot Data")
all_algos = OrderedDict() # Set of all algorithms, ordered by listing.
for env in figure_data:
for algo in figure_data[env]:
all_algos[algo] = None
for entry in figure_data[env][algo]:
# Has a length of generations + 1, since we add an extra 0 at
# the start.
evals = np.asarray(entry["Evaluations"])
entry_metrics = entry["series_metrics"]
for metric in entry_metrics:
if metric in METRIC_BLACKLIST:
continue
# Plot fewer data points to reduce file size.
# Metrics may have length of generations or generations + 1,
# as only some metrics (like archive size) add a 0 at the
# start.
metric_data = entry_metrics[metric]
raw_len = len(metric_data)
not_use_zero = int(len(metric_data) != len(evals))
gens = len(evals) - 1
# Start at 0 or 1 and end at gens.
x_data = np.arange(not_use_zero, gens + 1)
idx = list(range(0, raw_len, plot_every))
if idx[-1] != raw_len - 1:
# Make sure last index is included.
idx.append(raw_len - 1)
indexed_x_data = x_data[idx]
indexed_evals = evals[indexed_x_data]
indexed_metric_data = np.asarray(entry_metrics[metric])[idx]
data_len = len(indexed_evals)
plot_data["Environment"].append(np.full(data_len, env))
plot_data["Algorithm"].append(np.full(data_len, algo))
plot_data["Metric"].append(np.full(data_len, metric))
plot_data["Evaluations"].append(indexed_evals)
plot_data["Score"].append(indexed_metric_data)
# Flatten everything so that Seaborn understands it.
for d in plot_data:
plot_data[d] = np.concatenate(plot_data[d])
logger.info("Generating Plot")
with mpl_style_file("simple.mplstyle") as f:
with plt.style.context(f):
if palette == "qualitative_colors":
colors = QUALITATIVE_COLORS
elif palette == "colorblind_reordered":
# Rearrange the color-blind template.
colors = COLORBLIND_REORDERED
else:
colors = sns.color_palette(palette)
palette = dict(zip(all_algos, colors))
markers = dict(zip(all_algos, itertools.cycle("oD^vXP")))
# This takes a while since it has to generate the bootstrapped
# confidence intervals.
grid = sns.relplot(
data=plot_data,
x="Evaluations",
y="Score",
hue="Algorithm",
style="Algorithm",
row="Metric",
col="Environment",
kind="line",
# TODO: Remove this note when Seaborn 0.12.0 is released.
# errorbar is a feature in the development version of Seaborn.
errorbar="se",
markers=markers,
markevery=(0.5, 10.0),
dashes=False,
height=height,
aspect=1.61803, # Golden ratio.
facet_kws={"sharey": False},
palette=palette,
legend=False,
)
# Set titles to be the env name.
grid.set_titles("{col_name}")
# Turn off titles below top row (no need to repeat).
for ax in grid.axes[1:].ravel():
ax.set_title("")
# Set the labels along the left column to be the name of the metric.
left_col = list(figure_data)[0]
for (row_val, col_val), ax in grid.axes_dict.items():
ax.set_axisbelow(True)
if col_val == left_col:
ax.set_ylabel(row_val, labelpad=10.0)
# Add legend and resize figure to fit it.
grid.fig.legend(*legend_info(all_algos, palette, markers),
bbox_to_anchor=[0.5, 1.0],
loc="upper center",
ncol=(len(palette) + 1) // 2)
fig_width, fig_height = grid.fig.get_size_inches()
legend_height = 0.55
grid.fig.set_size_inches(fig_width, fig_height + legend_height)
# Save the figure.
grid.fig.tight_layout(rect=(0, 0, 1, fig_height /
(fig_height + legend_height)))
for output in outputs:
grid.fig.savefig(output, dpi=300)
logger.info("Done")
def comparison_high_res(
figure_data: str = "./figure_data.json",
outputs: List[str] = ("comparison_high_res.pdf", "comparison_high_res.png"),
):
"""Generates the larger version of the figure for the supplemental material.
Simply calls comparison with the appropriate args.
"""
return comparison(figure_data, outputs, height=4, plot_every=5)
# Header lines for table files.
TABLE_HEADER = r"""
% THIS FILE IS AUTO-GENERATED. DO NOT MODIFY THIS FILE DIRECTLY.
"""
def table(figure_data: str = "figure_data.json",
transpose: bool = True,
output: str = "results_table.tex"):
"""Creates Latex tables showing final values of metrics.
Make sure to include the "booktabs" and "array" package in your Latex
document.
With transpose=False, a table is generated for each environment. Each table
has the algorithms as rows and the metrics as columns.
With transpose=True, a table is generated for each metric. Each table has
the algorithms as rows and the environments as columns.
Args:
figure_data: Path to JSON file with figure data.
transpose: See above.
output: Path to save Latex table.
"""
figure_data = load_figure_data(figure_data)
# Safe to assume all envs have same metrics.
first_env = list(figure_data)[0]
first_algo = list(figure_data[first_env])[0]
first_entry = figure_data[first_env][first_algo][0]
metric_names = (list(first_entry["series_metrics"]) +
list(first_entry["point_metrics"]))
for name in METRIC_BLACKLIST:
metric_names.remove(name)
if "QD Score AUC" in metric_names:
# Move QD Score AUC immediately after QD Score.
metric_names.remove("QD Score AUC")
metric_names.insert(metric_names.index("QD Score") + 1, "QD Score AUC")
logger.info("Metric names: {}", metric_names)
table_data = {}
logger.info("Gathering table data")
for env in figure_data:
table_data[env] = pd.DataFrame(index=list(figure_data[env]),
columns=metric_names,
dtype=str)
for algo in figure_data[env]:
for metric in metric_names:
if metric in first_entry["series_metrics"]:
final_metric_vals = np.array([
entry["series_metrics"][metric][-1]
for entry in figure_data[env][algo]
])
else:
final_metric_vals = np.array([
entry["point_metrics"][metric]
for entry in figure_data[env][algo]
])
if metric == "QD Score AUC":
# Special case because these values are so large.
table_data[env][metric][algo] = (
f"{final_metric_vals.mean() / 1e12:,.2f}")
else:
table_data[env][metric][algo] = (
f"{final_metric_vals.mean():,.2f}")
if transpose:
# "Invert" table_data.
table_data = {
metric:
pd.DataFrame({env: df[metric] for env, df in table_data.items()})
for metric in metric_names
}
logger.info("Writing to {}", output)
with open(output, "w") as file:
file.write(TABLE_HEADER)
for name, df in table_data.items():
if name == "QD Score AUC":
caption = name + " (multiple of $10^{12}$)"
else:
caption = name
file.write("\\begin{table*}[t]\n")
file.write("\\caption{" + caption + "}\n")
file.write("\\label{table:" + slugify.slugify(name) + "}\n")
file.write("\\begin{center}\n")
file.write(
df.to_latex(
column_format="l" + " R{0.9in}" * len(df.columns),
escape=False,
))
file.write("\\end{center}\n")
file.write("\\end{table*}\n")
file.write("\n")
logger.info("Done")
def calc_simple_main_effects(figure_data, anova_res, metric):
"""Calculates simple main effects in each environment.
Reference:
http://www.cee.uma.pt/ron/Discovering%20Statistics%20Using%20SPSS,%20Second%20Edition%20CD-ROM/Calculating%20Simple%20Effects.pdf
"""
df_residual = anova_res["DF"][3]
ms_residual = anova_res["MS"][3]
data = {
"Environment": ["Residual"],
"SS": [anova_res["SS"][3]],
"DF": [df_residual],
"MS": [ms_residual],
"F": [np.nan],
"p-unc": [np.nan],
"significant": [False],
}
for env in figure_data:
data["Environment"].append(env)
algos, metric_vals = [], []
for algo in figure_data[env]:
entry_metrics = [
metric_from_entry(entry, metric)
for entry in figure_data[env][algo]
]
algos.extend([algo] * len(entry_metrics))
metric_vals.extend(entry_metrics)
one_way = pingouin.anova(
dv=metric,
between=["Algorithm"],
data=pd.DataFrame({
"Algorithm": algos,
metric: metric_vals,
}),
detailed=True,
)
f_val = one_way["MS"][0] / ms_residual
p_unc = scipy.stats.f(one_way["DF"][0], df_residual).sf(f_val)
sig = p_unc < 0.05
data["SS"].append(one_way["SS"][0])
data["DF"].append(one_way["DF"][0])
data["MS"].append(one_way["MS"][0])
data["F"].append(f_val)
data["p-unc"].append(p_unc)
data["significant"].append(sig)
return | pd.DataFrame(data) | pandas.DataFrame |
##############################################################################
#Copyright 2019 Google LLC
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
################################################################################
from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor
import warnings
warnings.filterwarnings("ignore")
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from numpy import inf
from sklearn.linear_model import LassoCV, RidgeCV
from sklearn.linear_model import Lasso, Ridge
from sklearn.model_selection import StratifiedShuffleSplit
import matplotlib.pylab as plt
get_ipython().magic(u'matplotlib inline')
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 10, 6
from sklearn.metrics import classification_report, confusion_matrix
from functools import reduce
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.metrics import make_scorer
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import RandomizedSearchCV
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.model_selection import RepeatedKFold, RepeatedStratifiedKFold
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from autoviml.QuickML_Stacking import QuickML_Stacking
from autoviml.Transform_KM_Features import Transform_KM_Features
from autoviml.QuickML_Ensembling import QuickML_Ensembling
from autoviml.Auto_NLP import Auto_NLP, select_top_features_from_SVD
import xgboost as xgb
import sys
##################################################################################
def find_rare_class(classes, verbose=0):
######### Print the % count of each class in a Target variable #####
"""
Works on Multi Class too. Prints class percentages count of target variable.
It returns the name of the Rare class (the one with the minimum class member count).
This can also be helpful in using it as pos_label in Binary and Multi Class problems.
"""
counts = OrderedDict(Counter(classes))
total = sum(counts.values())
if verbose >= 1:
print(' Class -> Counts -> Percent')
for cls in counts.keys():
print("%6s: % 7d -> % 5.1f%%" % (cls, counts[cls], counts[cls]/total*100))
if type(pd.Series(counts).idxmin())==str:
return pd.Series(counts).idxmin()
else:
return int(pd.Series(counts).idxmin())
###############################################################################
def return_factorized_dict(ls):
"""
###### Factorize any list of values in a data frame using this neat function
if your data has any NaN's it automatically marks it as -1 and returns that for NaN's
Returns a dictionary mapping previous values with new values.
"""
factos = pd.unique(pd.factorize(ls)[0])
categs = pd.unique(pd.factorize(ls)[1])
if -1 in factos:
categs = np.insert(categs,np.where(factos==-1)[0][0],np.nan)
return dict(zip(categs,factos))
#############################################################################################
from sklearn.metrics import confusion_matrix
def balanced_accuracy_score(y_true, y_pred, sample_weight=None,
adjusted=False):
C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
with np.errstate(divide='ignore', invalid='ignore'):
per_class = np.diag(C) / C.sum(axis=1)
if np.any(np.isnan(per_class)):
warnings.warn('y_pred contains classes not in y_true')
per_class = per_class[~np.isnan(per_class)]
score = np.mean(per_class)
if adjusted:
n_classes = len(per_class)
chance = 1 / n_classes
score -= chance
score /= 1 - chance
return score
#############################################################################################
import os
def check_if_GPU_exists():
GPU_exists = False
try:
from tensorflow.python.client import device_lib
dev_list = device_lib.list_local_devices()
print('Number of GPUs = %d' %len(dev_list))
for i in range(len(dev_list)):
if 'GPU' == dev_list[i].device_type:
GPU_exists = True
print('%s available' %dev_list[i].device_type)
except:
print('')
if not GPU_exists:
try:
os.environ['NVIDIA_VISIBLE_DEVICES']
print('GPU available on this device')
return True
except:
print('No GPU available on this device')
return False
else:
return True
#############################################################################################
def analyze_problem_type(train, targ,verbose=0):
"""
This module analyzes a Target Variable and finds out whether it is a
Regression or Classification type problem
"""
if train[targ].dtype != 'int64' and train[targ].dtype != float :
if train[targ].dtype == object:
if len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 2:
model_class = 'Binary_Classification'
else:
model_class = 'Multi_Classification'
else:
if len(train[targ].unique()) == 2:
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 30:
model_class = 'Multi_Classification'
elif train[targ].dtype == 'int64' or train[targ].dtype == float :
if len(train[targ].unique()) == 1:
print('Error in data set: Only one class in Target variable. Check input and try again')
sys.exit()
elif len(train[targ].unique()) == 2:
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 30:
model_class = 'Multi_Classification'
else:
model_class = 'Regression'
elif train[targ].dtype == object:
if len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 2:
model_class = 'Binary_Classification'
else:
model_class = 'Multi_Classification'
elif train[targ].dtype == bool:
model_class = 'Binary_Classification'
elif train[targ].dtype == 'int64':
if len(train[targ].unique()) == 2:
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 1 and len(train[targ].unique()) <= 30:
model_class = 'Multi_Classification'
else:
model_class = 'Regression'
else :
model_class = 'Regression'
return model_class
#######
def convert_train_test_cat_col_to_numeric(start_train, start_test, col,str_flag=True):
"""
#### This is the easiest way to label encode object variables in both train and test
#### This takes care of some categories that are present in train and not in test
### and vice versa
"""
start_train = copy.deepcopy(start_train)
start_test = copy.deepcopy(start_test)
missing_flag = False
new_missing_col = ''
if start_train[col].isnull().sum() > 0:
missing_flag = True
if str_flag:
new_missing_col = col + '_Missing_Flag'
start_train[new_missing_col] = 0
start_train.loc[start_train[col].isnull(),new_missing_col]=1
start_train[col] = start_train[col].fillna("NA", inplace=False).astype(str)
else:
new_missing_col = col + '_Missing_Flag'
start_train[new_missing_col] = 0
start_train.loc[start_train[col].isnull(),new_missing_col]=1
start_train[col] = start_train[col].fillna("NA", inplace=False).astype('category')
if len(start_train[col].apply(type).value_counts()) > 1:
print(' Alert! Mixed Data Types in Train data set %s column with %d data types. Fixing it...' %(
col, len(start_train[col].apply(type).value_counts())))
train_categs = start_train[col].value_counts().index.tolist()
else:
train_categs = np.unique(start_train[col]).tolist()
if not isinstance(start_test,str) :
if start_test[col].isnull().sum() > 0:
#### IN some rare cases, Test data has missing values while Train data doesn.t
#### This section is take care of those rare cases. We need to create a missing col
#### We need to create that missing flag column in both train and test in that case
if not missing_flag:
missing_flag = True
new_missing_col = col + '_Missing_Flag'
start_train[new_missing_col] = 0
##### THis is to take care of Missing_Flag in start_test data set!!
start_test[new_missing_col] = 0
start_test.loc[start_test[col].isnull(),new_missing_col]=1
if str_flag:
start_test[col] = start_test[col].fillna("NA", inplace=False).astype(str)
else:
start_test[col] = start_test[col].fillna("NA", inplace=False).astype('category')
else:
#### In some rare cases, there is missing values in train but not in test data!
#### In those cases, we need to create a new_missing_col in test data in addition to train
start_test[new_missing_col] = 0
if len(start_test[col].apply(type).value_counts()) > 1:
print(' Alert! Mixed Data Types in Test data set %s column with %d data types. Fixing it...' %(
col, len(start_test[col].apply(type).value_counts())))
test_categs = start_test[col].value_counts().index.tolist()
test_categs = [x if isinstance(x,str) else str(x) for x in test_categs]
start_test[col] = start_test[col].astype(str).values
else:
test_categs = np.unique(start_test[col]).tolist()
if not isinstance(start_test,str) :
categs_all = np.unique( train_categs + test_categs).tolist()
dict_all = return_factorized_dict(categs_all)
else:
dict_all = return_factorized_dict(train_categs)
start_train[col] = start_train[col].map(dict_all)
if not isinstance(start_test,str) :
start_test[col] = start_test[col].map(dict_all)
return start_train, start_test, missing_flag, new_missing_col
#############################################################################################################
def flatten_list(list_of_lists):
final_ls = []
for each_item in list_of_lists:
if isinstance(each_item,list):
final_ls += each_item
else:
final_ls.append(each_item)
return final_ls
#############################################################################################################
import scipy as sp
def Auto_ViML(train, target, test='',sample_submission='',hyper_param='RS', feature_reduction=True,
scoring_parameter='logloss', Boosting_Flag=None, KMeans_Featurizer=False,
Add_Poly=0, Stacking_Flag=False, Binning_Flag=False,
Imbalanced_Flag=False, verbose=0):
"""
#########################################################################################################
############# This is not an Officially Supported Google Product! #########################
#########################################################################################################
#### Automatically Build Variant Interpretable Machine Learning Models (Auto_ViML) ######
#### Developed by <NAME> ######
###### Version 0.1.652 #######
##### GPU UPGRADE!! Now with Auto_NLP. Best Version to Download or Upgrade. May 15,2020 ######
###### Auto_VIMAL with Auto_NLP combines structured data with NLP for Predictions. #######
#########################################################################################################
#Copyright 2019 Google LLC #######
# #######
#Licensed under the Apache License, Version 2.0 (the "License"); #######
#you may not use this file except in compliance with the License. #######
#You may obtain a copy of the License at #######
# #######
# https://www.apache.org/licenses/LICENSE-2.0 #######
# #######
#Unless required by applicable law or agreed to in writing, software #######
#distributed under the License is distributed on an "AS IS" BASIS, #######
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #######
#See the License for the specific language governing permissions and #######
#limitations under the License. #######
#########################################################################################################
#### Auto_ViML was designed for building a High Performance Interpretable Model With Fewest Vars. ###
#### The "V" in Auto_ViML stands for Variant because it tries Multiple Models and Multiple Features ###
#### to find the Best Performing Model for any data set.The "i" in Auto_ViML stands " Interpretable"###
#### since it selects the fewest Features to build a simpler, more interpretable model. This is key. ##
#### Auto_ViML is built mostly using Scikit-Learn, Numpy, Pandas and Matplotlib. Hence it should run ##
#### on any Python 2 or Python 3 Anaconda installations. You won't have to import any special ####
#### Libraries other than "SHAP" library for SHAP values which provides more interpretability. #####
#### But if you don't have it, Auto_ViML will skip it and show you the regular feature importances. ###
#########################################################################################################
#### INPUTS: ###
#########################################################################################################
#### train: could be a datapath+filename or a dataframe. It will detect which is which and load it.####
#### test: could be a datapath+filename or a dataframe. If you don't have any, just leave it as "". ###
#### submission: must be a datapath+filename. If you don't have any, just leave it as empty string.####
#### target: name of the target variable in the data set. ####
#### sep: if you have a spearator in the file such as "," or "\t" mention it here. Default is ",". ####
#### scoring_parameter: if you want your own scoring parameter such as "f1" give it here. If not, #####
#### it will assume the appropriate scoring param for the problem and it will build the model.#####
#### hyper_param: Tuning options are GridSearch ('GS'), RandomizedSearch ('RS')and now HyperOpt ('HO')#
#### Default setting is 'GS'. Auto_ViML with HyperOpt is approximately 3X Faster than Auto_ViML###
#### feature_reduction: Default = 'True' but it can be set to False if you don't want automatic ####
#### feature_reduction since in Image data sets like digits and MNIST, you get better #####
#### results when you don't reduce features automatically. You can always try both and see. #####
#### KMeans_Featurizer = True: Adds a cluster label to features based on KMeans. Use for Linear. #####
#### False (default) = For Random Forests or XGB models, leave it False since it may overfit.####
#### Boosting Flag: you have 3 possible choices (default is False): #####
#### None = This will build a Linear Model #####
#### False = This will build a Random Forest or Extra Trees model (also known as Bagging) #####
#### True = This will build an XGBoost model #####
#### Add_Poly: Default is 0. It has 2 additional settings: #####
#### 1 = Add interaction variables only such as x1*x2, x2*x3,...x9*10 etc. #####
#### 2 = Add Interactions and Squared variables such as x1**2, x2**2, etc. #####
#### Stacking_Flag: Default is False. If set to True, it will add an additional feature which #####
#### is derived from predictions of another model. This is used in some cases but may result#####
#### in overfitting. So be careful turning this flag "on". #####
#### Binning_Flag: Default is False. It set to True, it will convert the top numeric variables #####
#### into binned variables through a technique known as "Entropy" binning. This is very #####
#### helpful for certain datasets (especially hard to build models). #####
#### Imbalanced_Flag: Default is False. If set to True, it will downsample the "Majority Class" #####
#### in an imbalanced dataset and make the "Rare" class at least 5% of the data set. This #####
#### the ideal threshold in my mind to make a model learn. Do it for Highly Imbalanced data.#####
#### verbose: This has 3 possible states: #####
#### 0 = limited output. Great for running this silently and getting fast results. #####
#### 1 = more charts. Great for knowing how results were and making changes to flags in input. #####
#### 2 = lots of charts and output. Great for reproducing what Auto_ViML does on your own. #####
#########################################################################################################
#### OUTPUTS: #####
#########################################################################################################
#### model: It will return your trained model #####
#### features: the fewest number of features in your model to make it perform well #####
#### train_modified: this is the modified train dataframe after removing and adding features #####
#### test_modified: this is the modified test dataframe with the same transformations as train #####
################# A D D I T I O N A L N O T E S ###########
#### Finally, it writes your submission file to disk in the current directory called "mysubmission.csv"
#### This submission file is ready for you to show it clients or submit it to competitions. #####
#### If no submission file was given but as long as you give it a test file name, it will create #####
#### a submission file for you named "mySubmission.csv". #####
#### Auto_ViML works on any Multi-Class, Multi-Label Data Set. So you can have many target labels #####
#### You don't have to tell Auto_ViML whether it is a Regression or Classification problem. #####
#### Suggestions for a Scoring Metric: #####
#### If you have Binary Class and Multi-Class in a Single Label, Choose Accuracy. It will ######
#### do very well. If you want something better, try roc_auc even for Multi-Class which works. ######
#### You can try F1 or Weighted F1 if you want something complex or for Multi-Class. ######
#### Note that For Imbalanced Classes (<=5% classes), it automatically adds Class Weights. ######
#### Also, Note that it handles Multi-Label automatically so you can send Train data ######
#### with multiple Labels (Targets) and it will automatically predict for each Label. ######
#### Finally this is Meant to Be a Fast Algorithm, so use it for just quick POCs ######
#### This is Not Meant for Production Problems. It produces great models but it is not Perfect! ######
######################### HELP OTHERS! PLEASE CONTRIBUTE! OPEN A PULL REQUEST! ##########################
#########################################################################################################
"""
##### These copies are to make sure that the originals are not destroyed ####
CPU_count = os.cpu_count()
test = copy.deepcopy(test)
orig_train = copy.deepcopy(train)
orig_test = copy.deepcopy(test)
train_index = train.index
if not isinstance(test, str):
test_index = test.index
start_test = copy.deepcopy(orig_test)
####### These are Global Settings. If you change them here, it will ripple across the whole code ###
corr_limit = 0.70 #### This decides what the cut-off for defining highly correlated vars to remove is.
scaling = 'MinMax' ### This decides whether to use MinMax scaling or Standard Scaling ("Std").
first_flag = 0 ## This is just a setting to detect which is
seed= 99 ### this maintains repeatability of the whole ML pipeline here ###
subsample=0.7 #### Leave this low so the models generalize better. Increase it if you want overfit models
col_sub_sample = 0.7 ### Leave this low for the same reason above
poly_degree = 2 ### this create 2-degree polynomial variables in Add_Poly. Increase if you want more degrees
booster = 'gbtree' ### this is the booster for XGBoost. The other option is "Linear".
n_splits = 5 ### This controls the number of splits for Cross Validation. Increasing will take longer time.
matplotlib_flag = True #(default) This is for drawing SHAP values. If this is False, initJS is used.
early_stopping = 20 #### Early stopping rounds for XGBoost ######
encoded = '_Label_Encoded' ### This is the tag we add to feature names in the end to indicate they are label encoded
catboost_limit = 0.4 #### The catboost_limit represents the percentage of num vars in data. ANy lower, CatBoost is used.
cat_code_limit = 100 #### If the number of dummy variables to create in a data set exceeds this, CatBoost is the default Algorithm used
one_hot_size = 500 #### This determines the max length of one_hot_max_size parameter of CatBoost algrithm
Alpha_min = -3 #### The lowest value of Alpha in LOGSPACE that is used in CatBoost
Alpha_max = 2 #### The highest value of Alpha in LOGSPACE that is used in Lasso or Ridge Regression
Cs = [0.001,0.005,0.01,0.05,0.1,0.25,0.5,1,2,4,6,10,20,30,40,50,100,150,200,400,800,1000,2000]
#Cs = np.logspace(-4,3,40) ### The list of values of C used in Logistic Regression
tolerance = 0.001 #### This tolerance is needed to speed up Logistic Regression. Otherwise, SAGA takes too long!!
#### 'lbfgs' is the fastest one but doesnt provide accurate results. Newton-CG is slower but accurate!
#### SAGA is extremely slow. Even slower than Newton-CG. Liblinear is the fastest and as accurate as Newton-CG!
solvers = ['liblinear'] ### Other solvers for Logistic Regression model: ['newton-cg','lbfgs','saga','liblinear']
solver = 'liblinear' ### This is the next fastest solver after liblinear. Useful for Multi-class problems!
penalties = ['l2','l1'] ### This is to determine the penalties for LogisticRegression
n_steps = 6 ### number of estimator steps between 100 and max_estims
max_depth = 10 ##### This limits the max_depth used in decision trees and other classifiers
max_features = 10 #### maximum number of features in a random forest model or extra trees model
warm_start = True ### This is to set the warm_start flag for the ExtraTrees models
bootstrap = True #### Set this flag to control whether to bootstrap variables or not.
n_repeats = 1 #### This is for repeated KFold and StratifiedKFold - this changes the folds every time
Bins = 30 ### This is for plotting probabilities in a histogram. For small data sets, 30 is enough.
top_nlp_features = 100 ### This sets a limit on the number of features added by each NLP transformer!
removed_features_threshold = 5 #### This triggers the Truncated_SVD if number of removed features from XGB exceeds this!
calibrator_flag = False ### In Multi-class data sets, a CalibratedClassifier works better than regular classifiers!
max_class_length = 1 ### It turns out the number of classes is directly correlated to Estimated Time. Hence this!
print('############## D A T A S E T A N A L Y S I S #######################')
########## I F CATBOOST IS REQUESTED, THEN CHECK IF IT IS INSTALLED #######################
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
from catboost import CatBoostClassifier, CatBoostRegressor
#### Similarly for Random Forests Model, it takes too long with Grid Search, so MAKE IT RandomizedSearch!
if not Boosting_Flag: ### there is also a chance Boosting_Flag is None - This is to eliminate that chance!
if orig_train.shape[0] >= 10000:
hyper_param = 'RS'
print('Changing hyperparameter search to RS. Otherwise, Random Forests will take too long for 10,000+ rows')
elif Boosting_Flag: ### there is also a chance Boosting_Flag is None - This is to eliminate that chance!
if not isinstance(Boosting_Flag, str):
if orig_train.shape[0] >= 10000:
hyper_param = 'RS'
print('Changing hyperparameter search to RS. Otherwise XGBoost will take too long for 10,000+ rows.')
########### T H I S I S W H E R E H Y P E R O P T P A R A M S A R E S E T #########
if hyper_param == 'HO':
########### HyperOpt related objective functions are defined here #################
from hyperopt import hp, tpe
from hyperopt.fmin import fmin
from hyperopt import Trials
from autoviml.custom_scores_HO import accu, rmse, gini_sklearn, gini_meae
from autoviml.custom_scores_HO import gini_msle, gini_mae, gini_mse, gini_rmse
from autoviml.custom_scores_HO import gini_accuracy, gini_bal_accuracy, gini_roc
from autoviml.custom_scores_HO import gini_precision, gini_average_precision, gini_weighted_precision
from autoviml.custom_scores_HO import gini_macro_precision, gini_micro_precision
from autoviml.custom_scores_HO import gini_samples_precision, gini_f1, gini_weighted_f1
from autoviml.custom_scores_HO import gini_macro_f1, gini_micro_f1, gini_samples_f1,f2_measure
from autoviml.custom_scores_HO import gini_log_loss, gini_recall, gini_weighted_recall
from autoviml.custom_scores_HO import gini_samples_recall, gini_macro_recall, gini_micro_recall
else:
from autoviml.custom_scores import accu, rmse, gini_sklearn, gini_meae
from autoviml.custom_scores import gini_msle, gini_mae, gini_mse, gini_rmse
from autoviml.custom_scores import gini_accuracy, gini_bal_accuracy, gini_roc
from autoviml.custom_scores import gini_precision, gini_average_precision, gini_weighted_precision
from autoviml.custom_scores import gini_macro_precision, gini_micro_precision
from autoviml.custom_scores import gini_samples_precision, gini_f1, gini_weighted_f1
from autoviml.custom_scores import gini_macro_f1, gini_micro_f1, gini_samples_f1,f2_measure
from autoviml.custom_scores import gini_log_loss, gini_recall, gini_weighted_recall
from autoviml.custom_scores import gini_samples_recall, gini_macro_recall, gini_micro_recall
###### If hyper_param = 'GS', it takes a LOOOONG TIME with "SAGA" solver for LogisticRegression.
#### Hence to speed it up you need to change the tolerance threshold to something bigger
if hyper_param == 'GS':
tolerance = 0.01 #### This tolerance is bigger to speed up Logistic Regression. Otherwise, SAGA takes too long!!
########## This is where some more default parameters are set up ######
data_dimension = orig_train.shape[0]*orig_train.shape[1] ### number of cells in the entire data set .
if data_dimension > 1000000:
### if data dimension exceeds 1 million, then reduce no of params
no_iter=30
early_stopping = 10
test_size = 0.20
max_iter = 10000
Bins = 100
top_nlp_features = 300
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
max_estims = 5000
else:
max_estims = 400
else:
max_estims = 400
else:
if orig_train.shape[0] <= 1000:
no_iter=20
test_size = 0.1
max_iter = 4000
top_nlp_features = 250
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
max_estims = 3000
else:
max_estims = 300
else:
max_estims = 300
early_stopping = 4
else:
no_iter=30
test_size = 0.15
max_iter = 7000
top_nlp_features = 200
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
max_estims = 4000
else:
max_estims = 350
else:
max_estims = 350
early_stopping = 6
#### The warnings from Sklearn are so annoying that I have to shut it off ####
import warnings
warnings.filterwarnings("ignore")
def warn(*args, **kwargs):
pass
warnings.warn = warn
### First_Flag is merely a flag for the first time you want to set values of variables
if scaling == 'MinMax':
SS = MinMaxScaler()
elif scaling == 'Std':
SS = StandardScaler()
else:
SS = MinMaxScaler()
### Make target into a list so that we can uniformly process the target label
if not isinstance(target, list):
target = [target]
model_label = 'Single_Label'
elif isinstance(target, list):
if len(target)==1:
model_label = 'Single_Label'
elif len(target) > 1:
model_label = 'Multi_Label'
else:
print('Target variable is neither a string nor a list. Please check input and try again!')
return
##### This is where we run the Traditional models to compare them to XGB #####
start_time = time.time()
####################################################################################
##### Set up your Target Labels and Classes Properly Here #### Label Encoding #####
#### This is for Classification Problems Only where you do Label Encoding of Target
mldict = lambda: defaultdict(mldict)
label_dict = mldict()
first_time = True
print('Training Set Shape = {}'.format(orig_train.shape))
print(' Training Set Memory Usage = {:.2f} MB'.format(orig_train.memory_usage().sum() / 1024**2))
if not isinstance(orig_test,str):
print('Test Set Shape = {}'.format(orig_test.shape))
print(' Test Set Memory Usage = {:.2f} MB'.format(orig_test.memory_usage().sum() / 1024**2))
print('%s Target: %s' %(model_label,target))
###### Now analyze what problem we have here ####
try:
modeltype = analyze_problem_type(train, target[0],verbose)
except:
print('Cannot find the Target variable in data set. Please check input and try again')
return
for each_target in target:
#### Make sure you don't move these 2 lines: they need to be reset for every target!
#### HyperOpt will not do Trials beyond max_evals - so only if you reset here, it will do it again.
if hyper_param == 'HO':
params_dict = {}
bayes_trials = Trials()
############ THIS IS WHERE OTHER DEFAULT PARAMS ARE SET ###############
c_params = dict()
r_params = dict()
if modeltype == 'Regression':
scv = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=seed)
eval_metric = 'rmse'
objective = 'reg:squarederror'
model_class = 'Regression'
start_train = copy.deepcopy(orig_train)
else:
if len(np.unique(train[each_target])) == 2:
model_class = 'Binary-Class'
elif len(np.unique(train[each_target])) > 2:
model_class = 'Multi-Class'
##### If multi-class happens, then you absolutely need to do SMOTE. Otherwise, you don't get good results!
#### Unfortunately SMOTE blows up when the data set is large -> so better to turn it off!
print('ALERT! Setting Imbalanced_Flag to True in Auto_ViML for Multi_Classification problems improves results!')
#Imbalanced_Flag = True
else:
print('Target label %s has less than 2 classes. Stopping' %each_target)
return
### This is for Classification Problems Only ########
print('Shuffling the data set before training')
start_train = orig_train.sample(frac=1.0, random_state=seed)
scv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=seed)
if modeltype != 'Regression':
rare_class_orig = find_rare_class(orig_train[each_target].values,verbose=1)
### Perfrom Label Transformation only for Classification Problems ####
classes = np.unique(orig_train[each_target])
if first_time:
if hyper_param == 'GS':
print('Using GridSearchCV for Hyper Parameter Tuning. This is slow. Switch to RS for faster tuning...')
elif hyper_param == 'RS':
print('Using RandomizedSearchCV for Hyper Parameter Tuning. This is 3X faster than GridSearchCV...')
else:
print('Using HyperOpt which is approximately 3X Faster than GridSearchCV but results vary...')
first_time = False
if len(classes) > 2:
##### If Boosting_Flag = True, change it to False here since Multi-Class XGB is VERY SLOW!
max_class_length = len(classes)
if Boosting_Flag:
print('CAUTION: In Multi-Class Boosting (2+ classes), TRAINING WILL TAKE A LOT OF TIME!')
objective = 'multi:softmax'
eval_metric = "mlogloss"
else:
max_class_length = 2
eval_metric="logloss"
objective = 'binary:logistic'
### Do Label Encoding when the Target Classes in each Label are Strings or Multi Class ###
if type(start_train[each_target].values[0])==str or str(start_train[each_target].dtype
)=='category' or sorted(np.unique(start_train[each_target].values))[0] != 0:
### if the class is a string or if it has more than 2 classes, then use Factorizer!
label_dict[each_target]['values'] = start_train[each_target].values
#### Factorizer is the easiest way to convert target in train and predictions in test
#### This takes care of some classes that are present in train and not in predictions
### and vice versa. Hence it is better than Label Encoders which breaks when above happens.
train_targ_categs = list(start_train[each_target].value_counts().index)
if len(train_targ_categs) == 2:
majority_class = [x for x in train_targ_categs if x != rare_class_orig]
dict_targ_all = {majority_class[0]: 0, rare_class_orig: 1}
else:
dict_targ_all = return_factorized_dict(train_targ_categs)
start_train[each_target] = start_train[each_target].map(dict_targ_all)
label_dict[each_target]['dictionary'] = copy.deepcopy(dict_targ_all)
label_dict[each_target]['transformer'] = dict([(v,k) for (k,v) in dict_targ_all.items()])
label_dict[each_target]['classes'] = copy.deepcopy(train_targ_categs)
class_nums = list(dict_targ_all.values())
label_dict[each_target]['class_nums'] = copy.deepcopy(class_nums)
print('String or Multi Class target: %s transformed as follows: %s' %(each_target,dict_targ_all))
rare_class = find_rare_class(start_train[each_target].values)
else:
### Since the each_target here is already numeric, you don't have to modify it
start_train[each_target] = start_train[each_target].astype(int).values
rare_class = find_rare_class(start_train[each_target].values)
label_dict[each_target]['values'] = start_train[each_target].values
label_dict[each_target]['classes'] = np.unique(start_train[each_target].values)
class_nums = np.unique(start_train[each_target].values)
label_dict[each_target]['class_nums'] = copy.deepcopy(class_nums)
label_dict[each_target]['transformer'] = []
label_dict[each_target]['dictionary'] = dict(zip(classes,classes))
print(' Target %s is already numeric. No transformation done.' %each_target)
if rare_class != 1:
print('Alert! Rare Class is not 1 but %s in this data set' %rare_class)
else:
#### In Regression problems, max_class_length is artificially set to one.
#### It turns out that Estimated Time is correlated to number of classes in data set. Hence we use this!
max_class_length = 1
###########################################################################################
#### This is where we start doing the iterative hyper tuning parameters #####
params_dict = defaultdict(list)
accu_mean = []
error_rate = []
###### This is where we do the training and hyper parameter tuning ########
orig_preds = [x for x in list(orig_train) if x not in target]
count = 0
################# CLASSIFY COLUMNS HERE ######################
var_df = classify_columns(orig_train[orig_preds], verbose)
##### Classify Columns ################
id_cols = var_df['id_vars']
nlp_columns = var_df['nlp_vars']
date_cols = var_df['date_vars']
del_cols = var_df['cols_delete']
factor_cols = var_df['factor_vars']
numvars = var_df['continuous_vars']+var_df['int_vars']
cat_vars = var_df['string_bool_vars']+var_df['discrete_string_vars']+var_df[
'cat_vars']+var_df['factor_vars']+var_df['num_bool_vars']
num_bool_vars = var_df['num_bool_vars']
#######################################################################################
preds = [x for x in orig_preds if x not in id_cols+del_cols+date_cols+target]
if len(id_cols+del_cols+date_cols)== 0:
print(' No variables removed since no ID or low-information variables found in data set')
else:
print(' %d variables removed since they were ID or low-information variables'
%len(id_cols+del_cols+date_cols))
################## This is where real code begins ###################################################
GPU_exists = check_if_GPU_exists()
###### This is where we set the CPU and GPU parameters for XGBoost
param = {}
if Boosting_Flag:
if isinstance(Boosting_Flag,str):
if Boosting_Flag.lower() == 'catboost':
model_name = 'CatBoost'
hyper_param = None
else:
model_name = 'XGBoost'
else:
model_name = 'XGBoost'
elif Boosting_Flag is None:
model_name = 'Linear'
else:
model_name = 'Forests'
##### Set the Scoring Parameters here based on each model and preferences of user ##############
cpu_params = {}
if model_name == 'XGBoost':
##### WE should keep CPU params as backup in case GPU fails!
cpu_params['nthread'] = -1
cpu_params['tree_method'] = 'hist'
cpu_params['grow_policy'] = 'depthwise'
cpu_params['max_depth'] = max_depth
cpu_params['max_leaves'] = 0
cpu_params['verbosity'] = 0
cpu_params['gpu_id'] = 0
cpu_params['updater'] = 'grow_colmaker'
cpu_params['predictor'] = 'cpu_predictor'
cpu_params['num_parallel_tree'] = 1
if GPU_exists:
param['nthread'] = -1
param['tree_method'] = 'gpu_hist'
param['grow_policy'] = 'depthwise'
param['max_depth'] = max_depth
param['max_leaves'] = 0
param['verbosity'] = 0
param['gpu_id'] = 0
param['updater'] = 'grow_gpu_hist' #'prune'
param['predictor'] = 'gpu_predictor'
param['num_parallel_tree'] = 1
else:
param = copy.deepcopy(cpu_params)
validation_metric = copy.deepcopy(scoring_parameter)
elif model_name.lower() == 'catboost':
if model_class == 'Binary-Class':
catboost_scoring = 'Accuracy'
validation_metric = 'Accuracy'
loss_function='Logloss'
elif model_class == 'Multi-Class':
catboost_scoring = 'AUC'
validation_metric = 'AUC:type=Mu'
loss_function='MultiClass'
else:
loss_function = 'RMSE'
validation_metric = 'RMSE'
catboost_scoring = 'RMSE'
else:
validation_metric = copy.deepcopy(scoring_parameter)
########## D A T A P R E P R O C E S S I N G H E R E ##########################
print('############# D A T A P R E P A R A T I O N #############')
if start_train.isnull().sum().sum() > 0:
print('Filling missing values with "missing" placeholder and adding a column for missing_flags')
else:
print('No Missing Values in train data set')
copy_preds = copy.deepcopy(preds)
missing_flag_cols = []
if len(copy_preds) > 0:
dict_train = {}
for f in copy_preds:
if f in nlp_columns:
#### YOu have to skip this for NLP columns ##############
continue
missing_flag = False
if start_train[f].dtype == object:
#### This is the easiest way to label encode object variables in both train and test
#### This takes care of some categories that are present in train and not in test
### and vice versa
start_train, start_test,missing_flag,new_missing_col = convert_train_test_cat_col_to_numeric(start_train, start_test,f,True)
if missing_flag:
cat_vars.append(new_missing_col)
num_bool_vars.append(new_missing_col)
preds.append(new_missing_col)
missing_flag_cols.append(new_missing_col)
elif start_train[f].dtype == np.int64 or start_train[f].dtype == np.int32 or start_train[f].dtype == np.int16:
### if there are integer variables, don't scale them. Leave them as is.
fill_num = start_train[f].min() - 1
if start_train[f].isnull().sum() > 0:
missing_flag = True
new_missing_col = f + '_Missing_Flag'
start_train[new_missing_col] = 0
start_train.loc[start_train[f].isnull(),new_missing_col]=1
start_train[f] = start_train[f].fillna(fill_num).astype(int)
if type(orig_test) != str:
if missing_flag:
start_test[new_missing_col] = 0
if start_test[f].isnull().sum() > 0:
start_test.loc[start_test[f].isnull(),new_missing_col]=1
start_test[f] = start_test[f].fillna(fill_num).astype(int)
if missing_flag:
cat_vars.append(new_missing_col)
num_bool_vars.append(new_missing_col)
preds.append(new_missing_col)
missing_flag_cols.append(new_missing_col)
elif f in factor_cols:
start_train, start_test,missing_flag,new_missing_col = convert_train_test_cat_col_to_numeric(start_train, start_test,f,False)
if missing_flag:
cat_vars.append(new_missing_col)
num_bool_vars.append(new_missing_col)
preds.append(new_missing_col)
missing_flag_cols.append(new_missing_col)
else:
### for all numeric variables, fill missing values with 1 less than min.
fill_num = start_train[f].min() - 1
if start_train[f].isnull().sum() > 0:
missing_flag = True
new_missing_col = f + '_Missing_Flag'
start_train[new_missing_col] = 0
start_train.loc[start_train[f].isnull(),new_missing_col]=1
start_train[f] = start_train[f].fillna(fill_num)
if type(orig_test) != str:
if missing_flag:
start_test[new_missing_col] = 0
if start_test[f].isnull().sum() > 0:
start_test.loc[start_test[f].isnull(),new_missing_col]=1
start_test[f] = start_test[f].fillna(fill_num)
if missing_flag:
cat_vars.append(new_missing_col)
num_bool_vars.append(new_missing_col)
preds.append(new_missing_col)
missing_flag_cols.append(new_missing_col)
###########################################################################################
if orig_train.isnull().sum().sum() > 0:
### If there are missing values in remaining features print it here ####
top5 = orig_train.isnull().sum().sort_values(ascending=False).index.tolist()[:5]
print(' Columns with most missing values: %s' %(
[x for x in top5 if orig_train[x].isnull().sum()>0]))
print(' and their missing value totals: %s' %([orig_train[x].isnull().sum() for x in
top5 if orig_train[x].isnull().sum()>0]))
if start_train[copy_preds].isnull().sum().sum() == 0:
print('Completed missing value Imputation. No more missing values in train.')
if verbose >= 1:
print(' %d new missing value columns added: %s' %(len(missing_flag_cols),missing_flag_cols))
else:
print('Error: Unable to complete missing value imputation in train. Exiting...')
return
####################################################################################
if type(orig_test) != str:
if start_test[copy_preds].isnull().sum().sum() > 0:
print('Test data still has some missing values. Fix it. Exiting...')
return
else:
print('Test data has no missing values. Continuing...')
###########################################################################################
else:
print(' Could not find any variables in your data set. Please check your dataset and try again')
return
###########################################################################################
print('Completed Label Encoding and Filling of Missing Values for Train and Test Data')
### This is a minor test to make sure that Boolean vars are Integers if they are Numeric!
if len(num_bool_vars) > 0:
### Just make sure that numeric Boolean vars are set as Integer type -> otherwise CatBoost will blow up
for each_bool_num in var_df['num_bool_vars']:
start_train[each_bool_num] = start_train[each_bool_num].astype(int)
if type(start_test) != str:
start_test[each_bool_num] = start_test[each_bool_num].astype(int)
######################################################################################
######### Set your Refit Criterion here - if you want to maximize Precision or Recall do it here ##
if modeltype == 'Regression':
if scoring_parameter in ['log_loss', 'neg_mean_squared_error','mean_squared_error']:
refit_metric = 'rmse'
else:
refit_metric = 'mae'
else:
if scoring_parameter in ['precision', 'precision_score','average_precision']:
refit_metric = 'precision'
elif scoring_parameter in ['logloss', 'log_loss']:
refit_metric = 'log_loss'
elif scoring_parameter in ['recall', 'recall_score']:
refit_metric = 'recall'
elif scoring_parameter in ['f1', 'f1_score','f1_weighted']:
refit_metric = 'f1'
elif scoring_parameter in ['accuracy', 'balanced_accuracy','balanced-accuracy']:
refit_metric = 'balanced_accuracy'
else:
refit_metric = 'balanced_accuracy'
print('%s problem: hyperparameters are being optimized for %s' %(modeltype,refit_metric))
###########################################################################################
### Make sure you remove variables that are highly correlated within data set first
rem_vars = left_subtract(preds,numvars)
if len(numvars) > 0 and feature_reduction:
numvars = remove_variables_using_fast_correlation(start_train,numvars, 'pearson',
corr_limit,verbose)
### Reduced Preds are now free of correlated variables and hence can be used for Poly adds
red_preds = rem_vars + numvars
#### You need to save a copy of this red_preds so you can later on create a start_train
#### with it after each_target cycle is completed. Very important!
orig_red_preds = copy.deepcopy(red_preds)
for each_target in target:
print('\n############# PROCESSING T A R G E T = %s ##########################' %each_target)
######## D E F I N I N G N E W T R A I N and N E W T E S T here #########################
#### This is where we set the orig train data set with multiple labels to the new start_train
#### start_train has the new features added or reduced with the multi targets in one cycle
### That way, we start each train with one target, and then reset it with multi target
#############################################################################################
train = start_train[[each_target]+red_preds]
if type(orig_test) != str:
test = start_test[red_preds]
###### Add Polynomial Variables and Interaction Variables to Train ######
if Add_Poly >= 1:
if Add_Poly == 1:
print('\nAdding only Interaction Variables. This may result in Overfitting!')
elif Add_Poly == 2:
print('\nAdding only Squared Variables. This may result in Overfitting!')
elif Add_Poly == 3:
print('\nAdding Both Interaction and Squared Variables. This may result in Overfitting!')
## Since the data is already scaled, we set scaling to None here ##
### For train data we have to set the fit_flag to True ####
if len(numvars) > 1:
#### train_red contains reduced numeric variables with original and substituted poly/intxn variables
train_sel, lm, train_red,md,fin_xvars,feature_xvar_dict = add_poly_vars_select(train,numvars,
each_target,modeltype,poly_degree,Add_Poly,md='',
corr_limit=corr_limit, scaling='None',
fit_flag=True,verbose=verbose)
#### train_red contains reduced numeric variables with original and substituted poly/intxn variables
if len(left_subtract(train_sel,numvars)) > 0:
#### This means that new intxn and poly vars were added. In that case, you can use them as is
#### Since these vars were alread tested for correlation, there should be no high correlation!
### SO you can take train_sel as the new list of numeric vars (numvars) going forward!
addl_vars = left_subtract(train_sel,numvars)
#numvars = list(set(numvars).intersection(set(train_sel)))
##### Print the additional Interxn and Poly variables here #######
if verbose >= 1:
print(' Intxn and Poly Vars are: %s' %addl_vars)
train = train_red[train_sel].join(train[rem_vars+[each_target]])
red_preds = [x for x in list(train) if x not in [each_target]]
if type(test) != str:
######### Add Polynomial and Interaction variables to Test ################
## Since the data is already scaled, we set scaling to None here ##
### For Test data we have to set the fit_flag to False ####
_, _, test_x_df,_,_,_ = add_poly_vars_select(test,numvars,each_target,
modeltype,poly_degree,Add_Poly,md,
corr_limit, scaling='None', fit_flag=False,
verbose=verbose)
### we need to convert x_vars into text_vars in test_x_df using feature_xvar_dict
test_x_vars = test_x_df.columns.tolist()
test_text_vars = [feature_xvar_dict[x] for x in test_x_vars]
test_x_df.columns = test_text_vars
#### test_red contains reduced variables with orig and substituted poly/intxn variables
test_red = test_x_df[train_sel]
#### we should now combined test_red with rem_vars so that it is the same shape as train
test = test_red.join(test[rem_vars])
#### Now we should change train_sel to subst_vars since that is the new list of vars going forward
numvars = copy.deepcopy(train_sel)
else:
#### NO new variables were added. so we can skip the rest of the stuff now ###
#### This means the train_sel is the new set of numeric features selected by add_poly algorithm
red_preds = train_sel+rem_vars
print(' No new variable was added by polynomial features...')
else:
print('\nAdding Polynomial vars ignored since no numeric vars in data')
train_sel = copy.deepcopy(numvars)
else:
### if there are no Polynomial vars, then all numeric variables are selected
train_sel = copy.deepcopy(numvars)
################ A U T O N L P P R O C E S S I N G B E G I N S H E R E !!! ####
if len(nlp_columns) > 0:
for nlp_column in nlp_columns:
nlp_column_train = train[nlp_column].values
if not isinstance(orig_test, str):
nlp_column_test = test[nlp_column].values
train1, test1, best_nlp_transformer,max_features_limit = Auto_NLP(nlp_column,
train, test, each_target, refit_metric,
modeltype, top_nlp_features, verbose,
build_model=False)
########################################################################
if KMeans_Featurizer:
start_time1 = time.time()
##### Do a clustering of word vectors from each NLP_column. This gives great results!
tfidf_term_array = create_tfidf_terms(nlp_column_train, best_nlp_transformer,
is_train=True, max_features_limit=max_features_limit)
print ('Creating word clusters using term matrix of size: %d for Train data set...' %len(tfidf_term_array['terms']))
num_clusters = int(np.sqrt(len(tfidf_term_array['terms']))/2)
if num_clusters < 2:
num_clusters = 2
##### Always set verbose to 0 since we KMEANS running is too verbose!
km = KMeans(n_clusters=num_clusters, random_state=seed, verbose=0)
kme, cluster_labels = return_cluster_labels(km, tfidf_term_array, num_clusters,
is_train=True)
if isinstance(nlp_column, str):
cluster_col = nlp_column + '_word_cluster_label'
else:
cluster_col = str(nlp_column) + '_word_cluster_label'
train1[cluster_col] = cluster_labels
print ('Created one new column: %s using selected NLP technique...' %cluster_col)
if not isinstance(orig_test, str):
tfidf_term_array_test = create_tfidf_terms(nlp_column_test, best_nlp_transformer,
is_train=False, max_features_limit=max_features_limit)
_, cluster_labels_test = return_cluster_labels(kme, tfidf_term_array_test, num_clusters,
is_train=False)
test1[cluster_col] = cluster_labels_test
print ('Created word clusters using same sized term matrix for Test data set...')
print(' Time Taken for creating word cluster labels = %0.0f seconds' %(time.time()-start_time1) )
####### Make sure you include the above new columns created in the predictor variables!
red_preds = [x for x in list(train1) if x not in [each_target]]
train = train1[red_preds+[each_target]]
if not isinstance(orig_test, str):
test = test1[red_preds]
################ A U T O N L P P R O C E S S I N G E N D S H E R E !!! ####
###### We have to detect float variables again since we have created new variables using Auto_NLP!!
train_sel = np.array(red_preds)[(train[red_preds].dtypes==float).values].tolist()
######### A D D D A T E T I M E F E A T U R E S ####################
if len(date_cols) > 0:
#### Do this only if date time columns exist in your data set!
for date_col in date_cols:
print('Processing %s column for date time features....' %date_col)
date_df_train = create_time_series_features(orig_train, date_col)
if not isinstance(date_df_train, str):
date_col_adds = date_df_train.columns.tolist()
print(' Adding %d columns from date time column %s' %(len(date_col_adds),date_col))
train = train.join(date_df_train)
else:
date_col_adds = []
if not isinstance(orig_test, str):
date_df_test = create_time_series_features(orig_test, date_col)
if not isinstance(date_df_test, str):
test = test.join(date_df_test)
red_preds = [x for x in list(train) if x not in [each_target]]
train_sel = train_sel + date_col_adds
######### SELECT IMPORTANT FEATURES HERE #############################
if feature_reduction:
important_features,num_vars, imp_cats = find_top_features_xgb(train,red_preds,train_sel,
each_target,
modeltype,corr_limit,verbose)
else:
important_features = copy.deepcopy(red_preds)
num_vars = copy.deepcopy(numvars)
#### we need to set the rem_vars in case there is no feature reduction #######
imp_cats = left_subtract(important_features,num_vars)
#####################################################################################
if len(important_features) == 0:
print('No important features found. Using all input features...')
important_features = copy.deepcopy(red_preds)
num_vars = copy.deepcopy(numvars)
#### we need to set the rem_vars in case there is no feature reduction #######
imp_cats = left_subtract(important_features,num_vars)
### Training an XGBoost model to find important features
train = train[important_features+[each_target]]
######################################################################
if type(orig_test) != str:
test = test[important_features]
############## F E A T U R E E N G I N E E R I N G S T A R T S N O W ##############
###### From here on we do some Feature Engg using Target Variable with Data Leakage ############
### To avoid Model Leakage, we will now split the Data into Train and CV so that Held Out Data
## is Pure and is unadulterated by learning from its own Target. This is known as Data Leakage.
###################################################################################################
print('Starting Feature Engineering now...')
X = train[important_features]
y = train[each_target]
################ I M P O R T A N T ##################################################
### The reason we don't use train_test_split is because we want only a partial train entropy binned
### If we use the whole of Train for entropy binning then there will be data leakage and our
### cross validation test scores will not be so accurate. So don't change the next 5 lines here!
################ I M P O R T A N T ##################################################
if modeltype == 'Regression':
skf = KFold(n_splits=n_splits, random_state=seed)
else:
skf = StratifiedKFold(n_splits=n_splits, random_state=seed, shuffle=True)
cv_train_index, cv_index = next(skf.split(X, y))
################ TRAIN CV TEST SPLIT HERE ##################################################
try:
#### Sometimes this works but other times, it gives an error!
X_train, X_cv = X.loc[cv_train_index], X.loc[cv_index]
y_train, y_cv = y.loc[cv_train_index], y.loc[cv_index]
### The reason we don't use train_test_split is because we want only a partial train entropy binned
part_train = train.loc[cv_train_index]
part_cv = train.loc[cv_index]
except:
#### This works when the above method gives an error!
X_train, X_cv = X.iloc[cv_train_index], X.iloc[cv_index]
y_train, y_cv = y.iloc[cv_train_index], y.iloc[cv_index]
### The reason we don't use train_test_split is because we want only a partial train entropy binned
part_train = train.iloc[cv_train_index]
part_cv = train.iloc[cv_index]
print('Train CV Split completed with', "TRAIN rows:", cv_train_index.shape[0], "CV rows:", cv_index.shape[0])
################ IMPORTANT ENTROPY BINNING FIRST TIME #####################################
############ Add Entropy Binning of Continuous Variables Here ##############################
num_vars = np.array(important_features)[(train[important_features].dtypes==float)].tolist()
saved_important_features = copy.deepcopy(important_features) ### these are original features without '_bin' added
#### saved_num_vars is an important variable: it contains the orig_num_vars before they were binned
saved_num_vars = copy.deepcopy(num_vars) ### these are original numeric features without '_bin' added
############### BINNING FIRST TIME ##################################################
if Binning_Flag and len(saved_num_vars) > 0:
#### Do binning only when there are numeric features ####
#### When we Bin the first time, we set the entropy_binning flag to False so
#### no numeric variables are removed. But next time, we will remove them later!
part_train, num_vars, important_features, part_cv = add_entropy_binning(part_train,
each_target, saved_num_vars,
saved_important_features, part_cv,
modeltype, entropy_binning=False,verbose=verbose)
#### In saved_num_vars we send in all the continuous_vars but we bin only the top few vars.
### Those that are binned are removed from saved_num_vars and the remaining become num_vars
### Our job is to find the names of those original numeric variables which were binned.
### orig_num_vars contains original num vars. num_vars contains binned versions of those vars.
### Those binned variables have now become categorical vars and must be added to imp_cats.
### you get the name of the original vars which were binned here in this orig_num_vars variable!
orig_num_vars = left_subtract(saved_num_vars,num_vars)
#### you need to know the name of the binner variables. This is where you get it!
binned_num_vars = left_subtract(num_vars,saved_num_vars)
imp_cats += binned_num_vars
#### Also note that important_features does not contain orig_num_vars which have been erased.
else:
print(' Binning_Flag set to False or there are no numeric vars in data set to be binned')
####################### KMEANS FIRST TIME ############################
### Now we add another Feature tied to KMeans clustering using Predictor and Target variables ###
if KMeans_Featurizer and len(saved_num_vars) > 0:
### DO KMeans Featurizer only if there are numeric features in the data set!
print(' Adding one Feature named "KMeans_Clusters" based on KMeans_Featurizer_Flag=True...')
km_label = 'KMeans_Clusters'
if modeltype != 'Regression':
#### Make the number of clusters as the same as log10 of number of rows in Train
num_clusters = int(np.round(max(2,np.log10(train.shape[0]))))
#### Make the number of clusters as the same as log10 of number of rows in Train
train_clusters, cv_clusters = Transform_KM_Features(part_train[
important_features], part_train[each_target],
part_cv[important_features], num_clusters)
else:
### If it is Regression, you don't have to specify the number of clusters
train_clusters, cv_clusters = Transform_KM_Features(part_train[
important_features], part_train[each_target],
part_cv[important_features])
#### Since this is returning the each_target in X_train, we need to drop it here ###
print(' Used KMeans to naturally cluster Train predictor variables into %d clusters' %num_clusters)
part_train[km_label] = train_clusters
part_cv[km_label] = cv_clusters
#X_train.drop(each_target,axis=1,inplace=True)
imp_cats.append(km_label)
for imp_cat in imp_cats:
part_train[imp_cat] = part_train[imp_cat].astype(int)
part_cv[imp_cat] = part_cv[imp_cat].astype(int)
####### The features are checked again once we add the cluster feature ####
important_features.append(km_label)
else:
print(' KMeans_Featurizer set to False or there are no numeric vars in data')
km_label = ''
####################### STACKING FIRST TIME ############################
######### This is where you do Stacking of Multi Model Results into One Column ###
if Stacking_Flag:
#### In order to join, you need X_train to be a Pandas Series here ##
print('Alert! Stacking can produce Highly Overfit models on Training Data...')
### In order to avoid overfitting, we are going to learn from a small sample of data
### That is why we are using X_train to train on and using it to predict on X_cv!
addcol, stacks1 = QuickML_Stacking(part_train[important_features],part_train[
each_target],part_train[important_features],
modeltype, Boosting_Flag, scoring_parameter,verbose)
addcol, stacks2 = QuickML_Stacking(part_train[important_features],part_train[
each_target],part_cv[important_features],
modeltype, Boosting_Flag, scoring_parameter,verbose)
part_train = part_train.join(pd.DataFrame(stacks1,index=cv_train_index,
columns=addcol))
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
part_cv = part_cv.join(pd.DataFrame(stacks2,index=cv_index,
columns=addcol))
print(' Adding %d Stacking feature(s) to training data' %len(addcol))
###### We make sure that we remove any new features that are highly correlated ! #####
#addcol = remove_variables_using_fast_correlation(X_train,addcol,corr_limit,verbose)
important_features += addcol
###############################################################################
#### part train contains the unscaled original train. It also contains binned and orig_num_vars!
#### DO NOT DO TOUCH part_train and part_cv -> we need it to recrate train later!
####################### Now do Feature Scaling Here #################################
part_train_scaled, part_cv_scaled = perform_scaling_numeric_vars(part_train, important_features,
part_cv, model_name, SS)
#### part_train_scaled has both predictor and target variables. Target must be removed!
important_features = find_remove_duplicates(important_features)
X_train = part_train_scaled[important_features]
X_cv = part_cv_scaled[important_features]
#### Remember that the next 2 lines are crucial: if X and y are dataframes, then predict_proba
### will return dataframes or series. Otherwise it will return Numpy array's.
## Be consistent when using dataframes with XGB. That's the best way to keep feature names!
print('############### M O D E L B U I L D I N G B E G I N S ####################')
print('Rows in Train data set = %d' %X_train.shape[0])
print(' Features in Train data set = %d' %X_train.shape[1])
print(' Rows in held-out data set = %d' %X_cv.shape[0])
data_dim = X_train.shape[0]*X_train.shape[1]
### Setting up the Estimators for Single Label and Multi Label targets only
if modeltype == 'Regression':
metrics_list = ['neg_mean_absolute_error' ,'neg_mean_squared_error',
'neg_mean_squared_log_error','neg_median_absolute_error']
eval_metric = "rmse"
if scoring_parameter == 'neg_mean_absolute_error' or scoring_parameter =='mae':
meae_scorer = make_scorer(gini_meae, greater_is_better=False)
scorer = meae_scorer
elif scoring_parameter == 'neg_mean_squared_error' or scoring_parameter =='mse':
mse_scorer = make_scorer(gini_mse, greater_is_better=False)
scorer = mse_scorer
elif scoring_parameter == 'neg_mean_squared_log_error' or scoring_parameter == 'log_error':
msle_scorer = make_scorer(gini_msle, greater_is_better=False)
print(' Log Error is not recommended since predicted values might be negative and error')
rmse_scorer = make_scorer(gini_rmse, greater_is_better=False)
scorer = rmse_scorer
elif scoring_parameter == 'neg_median_absolute_error' or scoring_parameter == 'median_error':
mae_scorer = make_scorer(gini_mae, greater_is_better=False)
scorer = mae_scorer
elif scoring_parameter =='rmse' or scoring_parameter == 'root_mean_squared_error':
rmse_scorer = make_scorer(gini_rmse, greater_is_better=False)
scorer = rmse_scorer
else:
scoring_parameter = 'rmse'
rmse_scorer = make_scorer(gini_rmse, greater_is_better=False)
scorer = rmse_scorer
#### HYPER PARAMETERS FOR TUNING ARE SETUP HERE ###
if hyper_param == 'GS':
r_params = {
"Forests": {
"n_estimators" : np.linspace(100, max_estims, n_steps, dtype = "int"),
"max_depth": [3, 5, max_depth],
#"criterion" : ['mse','mae'],
},
"Linear": {
'alpha': np.logspace(-5,3),
},
"XGBoost": {
'learning_rate': np.linspace(0.1,0.5,5),
'gamma': np.linspace(0, 32,7).astype(int),
"max_depth": [3, 5, max_depth],
},
"CatBoost": {
'learning_rate': np.logspace(Alpha_min,Alpha_max,40),
},
}
else:
import scipy as sp
r_params = {
"Forests": {
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(1, 10),
"min_samples_leaf": sp.stats.randint(1, 20),
#"criterion" : ['mse','mae'],
},
"Linear": {
'alpha': sp.stats.uniform(scale=1000),
},
"XGBoost": {
'learning_rate': sp.stats.uniform(scale=1),
'gamma': sp.stats.randint(0, 32),
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(2, 10),
},
"CatBoost": {
'learning_rate': np.logspace(Alpha_min,Alpha_max,40),
},
}
if Boosting_Flag:
if model_name.lower() == 'catboost':
xgbm = CatBoostRegressor(verbose=1,iterations=max_estims,random_state=99,
one_hot_max_size=one_hot_size,
loss_function=loss_function, eval_metric=catboost_scoring,
subsample=0.7,bootstrap_type='Bernoulli',
metric_period = 100,
early_stopping_rounds=250,boosting_type='Plain')
else:
xgbm = XGBRegressor(seed=seed,n_jobs=-1,random_state=seed,subsample=subsample,
colsample_bytree=col_sub_sample,n_estimators=max_estims,
objective=objective)
xgbm.set_params(**param)
elif Boosting_Flag is None:
#xgbm = Lasso(max_iter=max_iter,random_state=seed)
xgbm = Lasso(max_iter=max_iter,random_state=seed)
else:
xgbm = RandomForestRegressor(
**{
'bootstrap': bootstrap, 'n_jobs': -1, 'warm_start': warm_start,
'random_state':seed,'min_samples_leaf':2,
'max_features': "sqrt"
})
else:
#### This is for Binary Classification ##############################
classes = label_dict[each_target]['classes']
metrics_list = ['accuracy_score','roc_auc_score','logloss', 'precision','recall','f1']
# Create regularization hyperparameter distribution with 50 C values ####
if hyper_param == 'GS':
c_params['XGBoost'] = {
'learning_rate': np.linspace(0.1,0.5,5),
'gamma': np.linspace(0, 32,7).astype(int),
"max_depth": [3, 5, max_depth],
}
c_params["CatBoost"] = {
'learning_rate': np.logspace(Alpha_min,Alpha_max,40),
}
if Imbalanced_Flag:
c_params['Linear'] = {
'C': Cs,
'solver' : solvers,
'penalty' : penalties,
'class_weight':[None, 'balanced'],
}
else:
c_params['Linear'] = {
'C': Cs,
'class_weight':[None, 'balanced'],
'penalty' : penalties,
'solver' : solvers,
}
c_params["Forests"] = {
##### I have selected these to avoid Overfitting which is a problem for small data sets
"n_estimators" : np.linspace(100, max_estims, n_steps, dtype = "int"),
"max_depth": [3, 5, max_depth],
#'max_features': [1,2,5, max_features],
#"criterion":['gini','entropy'],
}
else:
import scipy as sp
c_params['XGBoost'] = {
'learning_rate': sp.stats.uniform(scale=1),
'gamma': sp.stats.randint(0, 32),
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(1, 10),
}
c_params["CatBoost"] = {
'learning_rate': sp.stats.uniform(scale=1),
}
if Imbalanced_Flag:
c_params['Linear'] = {
'C': sp.stats.uniform(scale=100),
'solver' : solvers,
'penalty' : penalties,
'class_weight':[None, 'balanced'],
}
else:
c_params['Linear'] = {
'C': sp.stats.uniform(scale=100),
'penalty' : penalties,
'solver' : solvers,
}
c_params["Forests"] = {
##### I have selected these to avoid Overfitting which is a problem for small data sets
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(1, 10),
"min_samples_leaf": sp.stats.randint(1, 20),
#"criterion":['gini','entropy'],
#'max_features': ['log', "sqrt"] ,
#'class_weight':[None,'balanced']
}
# Create regularization hyperparameter distribution using uniform distribution
if len(classes) == 2:
objective = 'binary:logistic'
if scoring_parameter == 'accuracy' or scoring_parameter == 'accuracy_score':
accuracy_scorer = make_scorer(gini_accuracy, greater_is_better=True, needs_proba=False)
scorer =accuracy_scorer
elif scoring_parameter == 'gini':
gini_scorer = make_scorer(gini_sklearn, greater_is_better=False, needs_proba=True)
scorer =gini_scorer
elif scoring_parameter == 'auc' or scoring_parameter == 'roc_auc' or scoring_parameter == 'roc_auc_score':
roc_scorer = make_scorer(gini_roc, greater_is_better=True, needs_threshold=True)
scorer =roc_scorer
elif scoring_parameter == 'log_loss' or scoring_parameter == 'logloss':
scoring_parameter = 'neg_log_loss'
logloss_scorer = make_scorer(gini_log_loss, greater_is_better=False, needs_proba=False)
scorer =logloss_scorer
elif scoring_parameter=='balanced_accuracy' or scoring_parameter=='balanced-accuracy' or scoring_parameter=='average_accuracy':
bal_accuracy_scorer = make_scorer(gini_bal_accuracy, greater_is_better=True,
needs_proba=False)
scorer = bal_accuracy_scorer
elif scoring_parameter == 'precision' or scoring_parameter == 'precision_score':
precision_scorer = make_scorer(gini_precision, greater_is_better=True, needs_proba=False,
pos_label=rare_class)
scorer =precision_scorer
elif scoring_parameter == 'recall' or scoring_parameter == 'recall_score':
recall_scorer = make_scorer(gini_recall, greater_is_better=True, needs_proba=False,
pos_label=rare_class)
scorer =recall_scorer
elif scoring_parameter == 'f1' or scoring_parameter == 'f1_score':
f1_scorer = make_scorer(gini_f1, greater_is_better=True, needs_proba=False,
pos_label=rare_class)
scorer =f1_scorer
elif scoring_parameter == 'f2' or scoring_parameter == 'f2_score':
f2_scorer = make_scorer(f2_measure, greater_is_better=True, needs_proba=False)
scorer =f2_scorer
else:
logloss_scorer = make_scorer(gini_log_loss, greater_is_better=False, needs_proba=False)
scorer =logloss_scorer
#f1_scorer = make_scorer(gini_f1, greater_is_better=True, needs_proba=False,
# pos_label=rare_class)
#scorer = f1_scorer
### DO NOT USE NUM CLASS WITH BINARY CLASSIFICATION ######
if Boosting_Flag:
if model_name.lower() == 'catboost':
xgbm = CatBoostClassifier(verbose=1,iterations=max_estims,
random_state=99,one_hot_max_size=one_hot_size,
loss_function=loss_function, eval_metric=catboost_scoring,
subsample=0.7,bootstrap_type='Bernoulli',
metric_period = 100,
early_stopping_rounds=250,boosting_type='Plain')
else:
xgbm = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,
colsample_bytree=col_sub_sample,gamma=1, learning_rate=0.1, max_delta_step=0,
max_depth=max_depth, min_child_weight=1, missing=-999, n_estimators=max_estims,
n_jobs=-1, nthread=None, objective=objective,
random_state=1, reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1,
seed=1)
xgbm.set_params(**param)
elif Boosting_Flag is None:
#### I have set the Verbose to be False here since it produces too much output ###
xgbm = LogisticRegression(random_state=seed,verbose=False,n_jobs=-1,solver=solver,
fit_intercept=True, tol=tolerance,
warm_start=warm_start, max_iter=max_iter)
else:
xgbm = RandomForestClassifier(
**{
'bootstrap': bootstrap, 'n_jobs': -1, 'warm_start': warm_start,
'random_state':seed,'min_samples_leaf':2,'oob_score':True,
'max_features': "sqrt"
})
else:
##### This is for MULTI Classification ##########################
objective = 'multi:softmax'
eval_metric = "mlogloss"
if scoring_parameter == 'gini':
gini_scorer = make_scorer(gini_sklearn, greater_is_better=False, needs_proba=True)
scorer = gini_scorer
elif scoring_parameter=='balanced_accuracy' or scoring_parameter=='balanced-accuracy' or scoring_parameter=='average_accuracy':
bal_accuracy_scorer = make_scorer(gini_bal_accuracy, greater_is_better=True,
needs_proba=False)
scorer = bal_accuracy_scorer
elif scoring_parameter == 'roc_auc' or scoring_parameter == 'roc_auc_score':
roc_auc_scorer = make_scorer(gini_sklearn, greater_is_better=False, needs_proba=True)
scorer = roc_auc_scorer
elif scoring_parameter == 'average_precision' or scoring_parameter == 'mean_precision':
average_precision_scorer = make_scorer(gini_average_precision,
greater_is_better=True, needs_proba=True)
scorer = average_precision_scorer
elif scoring_parameter == 'samples_precision':
samples_precision_scorer = make_scorer(gini_samples_precision,
greater_is_better=True, needs_proba=True)
scorer = samples_precision_scorer
elif scoring_parameter == 'weighted_precision' or scoring_parameter == 'weighted-precision':
weighted_precision_scorer = make_scorer(gini_weighted_precision,
greater_is_better=True, needs_proba=True)
scorer = weighted_precision_scorer
elif scoring_parameter == 'macro_precision':
macro_precision_scorer = make_scorer(gini_macro_precision,
greater_is_better=True, needs_proba=True)
scorer = macro_precision_scorer
elif scoring_parameter == 'micro_precision':
scorer = micro_precision_scorer
micro_precision_scorer = make_scorer(gini_micro_precision,
greater_is_better=True, needs_proba=True)
elif scoring_parameter == 'samples_recall':
samples_recall_scorer = make_scorer(gini_samples_recall, greater_is_better=True, needs_proba=True)
scorer = samples_recall_scorer
elif scoring_parameter == 'weighted_recall' or scoring_parameter == 'weighted-recall':
weighted_recall_scorer = make_scorer(gini_weighted_recall,
greater_is_better=True, needs_proba=True)
scorer = weighted_recall_scorer
elif scoring_parameter == 'macro_recall':
macro_recall_scorer = make_scorer(gini_macro_recall,
greater_is_better=True, needs_proba=True)
scorer = macro_recall_scorer
elif scoring_parameter == 'micro_recall':
micro_recall_scorer = make_scorer(gini_micro_recall, greater_is_better=True, needs_proba=True)
scorer = micro_recall_scorer
elif scoring_parameter == 'samples_f1':
samples_f1_scorer = make_scorer(gini_samples_f1,
greater_is_better=True, needs_proba=True)
scorer = samples_f1_scorer
elif scoring_parameter == 'weighted_f1' or scoring_parameter == 'weighted-f1':
weighted_f1_scorer = make_scorer(gini_weighted_f1,
greater_is_better=True, needs_proba=True)
scorer = weighted_f1_scorer
elif scoring_parameter == 'macro_f1':
macro_f1_scorer = make_scorer(gini_macro_f1,
greater_is_better=True, needs_proba=True)
scorer = macro_f1_scorer
elif scoring_parameter == 'micro_f1':
micro_f1_scorer = make_scorer(gini_micro_f1,
greater_is_better=True, needs_proba=True)
scorer = micro_f1_scorer
else:
weighted_f1_scorer = make_scorer(gini_weighted_f1,
greater_is_better=True, needs_proba=True)
scorer = weighted_f1_scorer
import scipy as sp
if Boosting_Flag:
# Create regularization hyperparameter distribution using uniform distribution
if hyper_param == 'GS':
c_params['XGBoost'] = {
'learning_rate': np.linspace(0.1,0.5,5),
'gamma': np.linspace(0, 32,7).astype(int),
"max_depth": [3, 5, max_depth],
}
c_params["CatBoost"] = {
'learning_rate': np.logspace(Alpha_min,Alpha_max,40),
}
else:
import scipy as sp
c_params['XGBoost'] = {
'learning_rate': sp.stats.uniform(scale=1),
'gamma': sp.stats.randint(0, 32),
'n_estimators': sp.stats.randint(100, max_estims),
'max_depth': sp.stats.randint(1, 10)
}
c_params['CatBoost'] = {
'learning_rate': sp.stats.uniform(scale=1),
}
if model_name.lower() == 'catboost':
xgbm = CatBoostClassifier(verbose=1,iterations=max_estims,
random_state=99,one_hot_max_size=one_hot_size,
loss_function=loss_function, eval_metric=catboost_scoring,
subsample=0.7,bootstrap_type='Bernoulli',
metric_period = 100,
early_stopping_rounds=250,boosting_type='Plain')
else:
xgbm = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,
colsample_bytree=col_sub_sample, gamma=1, learning_rate=0.1, max_delta_step=0,
max_depth=max_depth, min_child_weight=1, missing=-999, n_estimators=max_estims,
n_jobs=-1, nthread=None, objective=objective,
random_state=1, reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1,
num_class= len(classes),
seed=1)
xgbm.set_params(**param)
elif Boosting_Flag is None:
if hyper_param == 'GS':
if Imbalanced_Flag:
c_params['Linear'] = {
'C': Cs,
'class_weight':[None, 'balanced'],
}
else:
c_params['Linear'] = {
'C': Cs,
}
else:
if Imbalanced_Flag:
c_params['Linear'] = {
'C': sp.stats.uniform(scale=100),
'class_weight':[None, 'balanced'],
}
else:
c_params['Linear'] = {
'C': sp.stats.uniform(scale=100),
}
#### I have set the Verbose to be False here since it produces too much output ###
xgbm = LogisticRegression(random_state=seed,verbose=False,n_jobs=-1,solver=solver,
fit_intercept=True, tol=tolerance, multi_class='auto',
max_iter=max_iter, warm_start=False,
)
else:
if hyper_param == 'GS':
c_params["Forests"] = {
##### I have selected these to avoid Overfitting which is a problem for small data sets
"n_estimators" : np.linspace(100, max_estims, n_steps, dtype = "int"),
"max_depth": [3, 5, max_depth],
#"criterion":['gini','entropy'],
}
else:
c_params["Forests"] = {
##### I have set these to avoid OverFitting which is a problem for small data sets ###
'n_estimators': sp.stats.randint(100,max_estims),
"max_depth": sp.stats.randint(1, 10),
"min_samples_leaf": sp.stats.randint(1, 20),
#"criterion":['gini','entropy'],
#'class_weight':[None,'balanced']
}
xgbm = RandomForestClassifier(bootstrap=bootstrap, oob_score=True,warm_start=warm_start,
n_estimators=100,max_depth=3,
min_samples_leaf=2,max_features='auto',
random_state=seed,n_jobs=-1)
###### Now do RandomizedSearchCV using # Early-stopping ################
if modeltype == 'Regression':
#scoreFunction = {"mse": "neg_mean_squared_error", "mae": "neg_mean_absolute_error"}
#### I have set the Verbose to be False here since it produces too much output ###
if hyper_param == 'GS':
#### I have set the Verbose to be False here since it produces too much output ###
gs = GridSearchCV(xgbm,param_grid=r_params[model_name],
scoring = scorer,
n_jobs=-1,
cv = scv,
refit = refit_metric,
return_train_score = True,
verbose=0)
elif hyper_param == 'RS':
gs = RandomizedSearchCV(xgbm,
param_distributions = r_params[model_name],
n_iter = no_iter,
scoring = scorer,
refit = refit_metric,
return_train_score = True,
random_state = seed,
cv = scv,
n_jobs=-1,
verbose = 0)
else:
#### CatBoost does not need Hyper Parameter tuning => it's great out of the box!
gs = copy.deepcopy(xgbm)
else:
if hyper_param == 'GS':
#### I have set the Verbose to be False here since it produces too much output ###
gs = GridSearchCV(xgbm,param_grid=c_params[model_name],
scoring = scorer,
return_train_score = True,
n_jobs=-1,
refit = refit_metric,
cv = scv,
verbose=0)
elif hyper_param == 'RS':
#### I have set the Verbose to be False here since it produces too much output ###
gs = RandomizedSearchCV(xgbm,
param_distributions = c_params[model_name],
n_iter = no_iter,
scoring = scorer,
refit = refit_metric,
return_train_score = True,
random_state = seed,
n_jobs=-1,
cv = scv,
verbose = 0)
else:
#### CatBoost does not need Hyper Parameter tuning => it's great out of the box!
gs = copy.deepcopy(xgbm)
#trains and optimizes the model
eval_set = [(X_train,y_train),(X_cv,y_cv)]
print('Finding Best Model and Hyper Parameters for Target: %s...' %each_target)
##### Here is where we put the part_train and part_cv together ###########
if modeltype != 'Regression':
### Do this only for Binary Classes and Multi-Classes, both are okay
baseline_accu = 1-(train[each_target].value_counts(1).sort_values())[rare_class]
print(' Baseline Accuracy Needed for Model = %0.2f%%' %(baseline_accu*100))
print('CPU Count = %s in this device' %CPU_count)
if modeltype == 'Regression':
if Boosting_Flag:
if model_name.lower() == 'catboost':
data_dim = data_dim*one_hot_size/len(preds)
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(3000000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(50000.*CPU_count)))
elif Boosting_Flag is None:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(80000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(40000.*CPU_count)))
else:
if hyper_param == 'GS':
if Boosting_Flag:
if model_name.lower() == 'catboost':
data_dim = data_dim*one_hot_size/len(preds)
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(300000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(10000.*CPU_count)))
elif Boosting_Flag is None:
#### A Linear model is usually the fastest ###########
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(50000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.3f mins' %(model_name,data_dim*max_class_length/(16000.*CPU_count)))
else:
if Boosting_Flag:
if model_name.lower() == 'catboost':
data_dim = data_dim*one_hot_size/len(preds)
print('Using %s Model, Estimated Training time = %0.2f mins' %(model_name,data_dim*max_class_length/(3000000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.2f mins' %(model_name,data_dim*max_class_length/(40000.*CPU_count)))
elif Boosting_Flag is None:
print('Using %s Model, Estimated Training time = %0.2f mins' %(model_name,data_dim*max_class_length/(100000.*CPU_count)))
else:
print('Using %s Model, Estimated Training time = %0.2f mins' %(model_name,data_dim*max_class_length/(25000.*CPU_count)))
##### Since we are using Multiple Models each with its own quirks, we have to make sure it is done this way
##### ############ TRAINING MODEL FIRST TIME WITH X_TRAIN AND TESTING ON X_CV ############
model_start_time = time.time()
################################################################################################################################
##### BE VERY CAREFUL ABOUT MODIFYING THIS NEXT LINE JUST BECAUSE IT APPEARS TO BE A CODING MISTAKE. IT IS NOT!! #############
################################################################################################################################
#######
if Imbalanced_Flag:
if modeltype == 'Regression':
########### In case someone sets the Imbalanced_Flag mistakenly to True and it is Regression, you must set it to False ######
Imbalanced_Flag = False
else:
####### Imbalanced with Classification #################
try:
print('############## Imbalanced Flag on: Training model with SMOTE Oversampling method ###########')
#### The model is the downsampled model Trained on downsampled data sets. ####
model, X_train, y_train = training_with_SMOTE(X_train,y_train,eval_set, gs,
Boosting_Flag, eval_metric,
modeltype, model_name,training=True,
minority_class=rare_class,imp_cats=imp_cats,
calibrator_flag=calibrator_flag,
GPU_exists=GPU_exists, params = cpu_params,
verbose=verbose)
if isinstance(model, str):
model = copy.deepcopy(gs)
#### If d_model failed, it will just be an empty string, so you try the regular model ###
print('Error in training Imbalanced model first time. Trying regular model..')
Imbalanced_Flag = False
if Boosting_Flag:
if model_name == 'XGBoost':
#### Set the Verbose to 0 since we don't want too much output ##
try:
model.fit(X_train, y_train, early_stopping_rounds=early_stopping,
eval_metric=eval_metric,eval_set=eval_set,verbose=0)
except:
#### On Colab, even though GPU exists, many people don't turn it on.
#### In that case, XGBoost blows up when gpu_predictor is used.
#### This is to turn it back to cpu_predictor in case GPU errors!
if GPU_exists:
print('Error: GPU exists but it is not turned on. Using CPU for predictions...')
model.estimator.set_params(**cpu_params)
model.fit(X_train,y_train, early_stopping_rounds=early_stopping,
eval_metric=eval_metric,eval_set=eval_set,verbose=False)
else:
model.fit(X_train,y_train,
eval_metric=eval_metric, verbose=False)
else:
try:
model.fit(X_train, y_train,
cat_features=imp_cats,eval_set=(X_cv,y_cv), use_best_model=True,plot=True)
except:
model.fit(X_train, y_train, cat_features=imp_cats,use_best_model=False,plot=False)
else:
model.fit(X_train, y_train)
#### If downsampling succeeds, it will be used to get the best score and can become model again ##
if hyper_param == 'RS' or hyper_param == 'GS':
best_score = model.best_score_
else:
val_keys = list(model.best_score_.keys())
best_score = model.best_score_[val_keys[-1]][validation_metric]
except:
print('Error in training Imbalanced model first time. Trying regular model..')
Imbalanced_Flag = False
best_score = 0
################################################################################################################################
####### Though this next step looks like it is a Coding Mistake by Me, don't change it!!! ###################
####### This is for case when Imbalanced with Classification succeeds, this next step is skipped ############
################################################################################################################################
if not Imbalanced_Flag:
########### This is for both regular Regression and regular Classification Model Training. It is not a Mistake #############
########### In case Imbalanced training fails, this method is also tried. That's why we test the Flag here!! #############
try:
model = copy.deepcopy(gs)
if Boosting_Flag:
if model_name == 'XGBoost':
try:
#### Set the Verbose to 0 since we don't want too much output ##
model.fit(X_train, y_train, early_stopping_rounds=early_stopping,
eval_metric=eval_metric,eval_set=eval_set,verbose=0)
except:
#### On Colab, even though GPU exists, many people don't turn it on.
#### In that case, XGBoost blows up when gpu_predictor is used.
#### This is to turn it back to cpu_predictor in case GPU errors!
if GPU_exists:
print('Error: GPU exists but it is not turned on. Using CPU for predictions...')
model.estimator.set_params(**cpu_params)
model.fit(X_train,y_train, early_stopping_rounds=early_stopping,
eval_metric=eval_metric,eval_set=eval_set,verbose=False)
else:
model.fit(X_train,y_train,
eval_metric=eval_metric, verbose=False)
else:
try:
model.fit(X_train, y_train, cat_features=imp_cats,
eval_set=(X_cv,y_cv), use_best_model=True, plot=True)
except:
model.fit(X_train, y_train, cat_features=imp_cats, use_best_model=False, plot=False)
else:
model.fit(X_train, y_train)
except:
print('Training regular model first time is Erroring: Check if your Input is correct...')
return
try:
if hyper_param == 'RS' or hyper_param == 'GS':
best_score = model.best_score_
validation_metric = copy.deepcopy(scoring_parameter)
else:
val_keys = list(model.best_score_.keys())
if 'validation' in val_keys:
validation_metric = list(model.best_score_['validation'].keys())[0]
best_score = model.best_score_['validation'][validation_metric]
else:
validation_metric = list(model.best_score_['learn'].keys())[0]
best_score = model.best_score_['learn'][validation_metric]
except:
print('Error: Not able to print validation metrics. Continuing...')
## TRAINING OF MODELS COMPLETED. NOW GET METRICS on CV DATA ################
print(' Actual training time (in seconds): %0.0f' %(time.time()-model_start_time))
print('########### S I N G L E M O D E L R E S U L T S #################')
if modeltype != 'Regression':
############## This is for Classification Only !! ########################
if scoring_parameter in ['logloss','neg_log_loss','log_loss','log-loss','']:
print('{}-fold Cross Validation {} = {}'.format(n_splits, 'logloss', best_score))
elif scoring_parameter in ['accuracy','balanced-accuracy','balanced_accuracy','roc_auc','roc-auc',
'f1','precision','recall','average-precision','average_precision',
'weighted_f1','weighted-f1','AUC']:
print('%d-fold Cross Validation %s = %0.1f%%' %(n_splits,scoring_parameter, best_score*100))
else:
print('%d-fold Cross Validation %s = %0.1f' %(n_splits,validation_metric, best_score))
else:
######### This is for Regression only ###############
if best_score < 0:
best_score = best_score*-1
if scoring_parameter == '':
print('%d-fold Cross Validation %s Score = %0.4f' %(n_splits,'RMSE', best_score))
else:
print('%d-fold Cross Validation %s Score = %0.4f' %(n_splits,validation_metric, best_score))
#### We now need to set the Best Parameters, Fit the Model on Full X_train and Predict on X_cv
### Find what the order of best params are and set the same as the original model ###
if hyper_param == 'RS' or hyper_param == 'GS':
best_params= model.best_params_
print(' Best Parameters for Model = %s' %model.best_params_)
else:
#### CatBoost does not need Hyper Parameter tuning => it's great out of the box!
#### CatBoost does not need too many iterations. Just make sure you set the iterations low after the first time!
if model.get_best_iteration() == 0:
### In some small data sets, the number of iterations becomes zero, hence we set it as a default number
best_params = dict(zip(['iterations','learning_rate'],[1000,model.get_all_params()['learning_rate']]))
else:
best_params = dict(zip(['iterations','learning_rate'],[model.get_best_iteration(),model.get_all_params()['learning_rate']]))
print(' %s Best Parameters for Model: Iterations = %s, learning_rate = %0.2f' %(
model_name, model.get_best_iteration(), model.get_all_params()['learning_rate']))
if hyper_param == 'RS' or hyper_param == 'GS':
#### In the case of CatBoost, we don't do any Hyper Parameter tuning #########
gs = copy.deepcopy(model)
model = gs.best_estimator_
if modeltype == 'Multi_Classification':
try:
if X_cv.shape[0] <= 1000:
# THis works well for small data sets and is similar to parametric
method= 'sigmoid' # 'isotonic' # #
else:
# THis works well for large data sets and is non-parametric
method= 'isotonic'
model = CalibratedClassifierCV(model, method=method, cv="prefit")
model.fit(X_train, y_train)
print('Using a Calibrated Classifier in this Multi_Classification dataset to improve results...')
calibrator_flag = True
except:
calibrator_flag = False
pass
### Make sure you set this flag as False so that when ensembling is completed, this flag is True ##
if model_name.lower() == 'catboost':
print('Best Model selected and its parameters are:\n %s' %model.get_all_params())
else:
print('Best Model selected and its parameters are:\n %s' %model)
performed_ensembling = False
if modeltype != 'Regression':
m_thresh = 0.5
y_proba = model.predict_proba(X_cv)
y_pred = model.predict(X_cv)
if len(classes) <= 2:
print('Finding Best Threshold for Highest F1 Score...')
precision, recall, thresholds = precision_recall_curve(y_cv, y_proba[:,rare_class])
#precision, recall, thresholds = precision_recall_curve(y_cv, y_proba[:,1])
try:
f1 = (2*precision*recall)/(precision+recall)
f1 = np.nan_to_num(f1)
m_idx = np.argmax(f1)
m_thresh = thresholds[m_idx]
best_f1 = f1[m_idx]
except:
best_f1 = f1_score(y_cv, y_pred)
m_thresh = 0.5
# retrieve just the probabilities for the positive class
pos_probs = y_proba[:, rare_class]
if verbose >= 1:
# create a histogram of the predicted probabilities for the Rare Class since it will help decide threshold
plt.figure(figsize=(6,6))
plt.hist(pos_probs, bins=Bins, color='g')
plt.title("Model's Predictive Probability Histogram for Rare Class=%s with suggested threshold in red" %rare_class_orig)
plt.axvline(x=m_thresh, color='r', linestyle='--')
plt.show();
print(" Using threshold=0.5. However, %0.3f provides better F1=%0.2f for rare class..." %(m_thresh,best_f1))
###y_pred = (y_proba[:,rare_class]>=m_thresh).astype(int)
predicted = copy.deepcopy(y_proba)
predicted [:,0] = (predicted [:,0] >= (1-m_thresh)).astype('int')
predicted [:,1] = (predicted [:,1] > m_thresh).astype('int')
if m_thresh != 0.5:
y_pred = predicted[:,rare_class]
else:
y_proba = model.predict_proba(X_cv)
y_pred = model.predict(X_cv)
else:
y_pred = model.predict(X_cv)
### This is where you print out the First Model's Results ########
print('########################################################')
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(y_pred,pd.Series):
y_pred = y_pred.values
if isinstance(y_cv,pd.Series):
y_cv = y_cv.values
print('%s Model Prediction Results on Held Out CV Data Set:' %model_name)
if modeltype == 'Regression':
rmsle_calculated_m = rmse(y_cv, y_pred)
print_regression_model_stats(y_cv, y_pred,'%s Model: Predicted vs Actual for %s'%(model_name,each_target))
else:
if model_name == 'Forests':
if calibrator_flag:
print(' OOB Score = %0.3f' %model.base_estimator.oob_score_)
else:
print(' OOB Score = %0.3f' %model.oob_score_)
rmsle_calculated_m = balanced_accuracy_score(y_cv,y_pred)
if len(classes) == 2:
print(' Regular Accuracy Score = %0.1f%%' %(accuracy_score(y_cv,y_pred)*100))
y_probas = model.predict_proba(X_cv)
rmsle_calculated_m = print_classification_model_stats(y_cv, y_probas, m_thresh)
else:
###### Use a nice classification matrix printing module here #########
print(' Balanced Accuracy Score = %0.1f%%' %(rmsle_calculated_m*100))
print(classification_report(y_cv,y_pred))
print(confusion_matrix(y_cv, y_pred))
###### SET BEST PARAMETERS HERE ######
### Find what the order of best params are and set the same as the original model ###
## This is where we set the best parameters from training to the model ####
if modeltype == 'Regression':
if not Stacking_Flag:
print('################# E N S E M B L E M O D E L ##################')
try:
cols = []
subm = pd.DataFrame()
#### This is for Ensembling Only #####
models_list, cv_ensembles = QuickML_Ensembling(X_train, y_train, X_cv, y_cv,
modeltype=modeltype, Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
else:
subm[new_col] = cv_ensembles[:,each]
cols.append(new_col)
if len(cols) == 5:
print(' Displaying results of weighted average ensemble of %d regressors' %len(cols))
ensem_pred = subm[cols[-1]]*0.5+0.125*(subm[cols[0]]+subm[
cols[1]]+subm[cols[2]]+subm[cols[3]])
else:
print(' Calculating regular average ensemble of %d regressors' %len(cols))
ensem_pred = (subm[cols].mean(axis=1))
print('#############################################################################')
performed_ensembling = True
#### Since we have a new ensembled y_pred, make sure it is series or array before printing it!
if isinstance(y_pred,pd.Series):
print_regression_model_stats(y_cv, ensem_pred.values,'Ensemble Model: Model Predicted vs Actual for %s' %each_target)
else:
print_regression_model_stats(y_cv, ensem_pred,'Ensemble Model: Model Predicted vs Actual for %s' %each_target)
except:
print('Could not complete Ensembling predictions on held out data due to Error')
else:
## This is for Classification Problems Only #
### Find what the order of best params are and set the same as the original model ###
## This is where we set the best parameters from training to the model ####
if not Stacking_Flag:
print('################# E N S E M B L E M O D E L ##################')
#### We do Ensembling only if the Stacking_Flag is False. Otherwise, we don't!
try:
classes = label_dict[each_target]['classes']
cols = []
subm = pd.DataFrame()
#### This is for Ensembling Only #####
if len(classes) == 2:
models_list, cv_ensembles = QuickML_Ensembling(X_train, y_train, X_cv, y_cv,
modeltype='Binary_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
else:
models_list, cv_ensembles = QuickML_Ensembling(X_train, y_train, X_cv, y_cv,
modeltype='Multi_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
else:
subm[new_col] = cv_ensembles[:,each]
cols.append(new_col)
if len(cols) == 5:
print(' Displaying results of weighted average ensemble of %d classifiers' %len(cols))
ensem_pred = np.round(subm[cols[-1]]*0.5+0.125*(subm[cols[0]]+subm[
cols[1]]+subm[cols[2]]+subm[cols[3]])).astype(int)
else:
print(' Calculating regular average ensemble of %d classifiers' %len(cols))
ensem_pred = (subm[cols].mean(axis=1)).astype(int)
print('#############################################################################')
performed_ensembling = True
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(ensem_pred,pd.Series):
ensem_pred = ensem_pred.values
except:
print('Could not complete Ensembling predictions on held out data due to Error')
else:
print('No Ensembling of models done since Stacking_Flag = True ')
if verbose >= 1:
if len(classes) == 2:
plot_classification_results(model,X_cv, y_cv, y_pred, classes, class_nums, each_target )
else:
try:
Draw_ROC_MC_ML(model, X_cv, y_cv, each_target, model_name, verbose)
Draw_MC_ML_PR_ROC_Curves(model,X_cv,y_cv)
except:
print('Could not plot PR and ROC curves. Continuing...')
#### In case there are special scoring_parameter requests, you can print it here!
if scoring_parameter == 'roc_auc' or scoring_parameter == 'auc':
if len(classes) == 2:
print(' ROC AUC Score = %0.1f%%' %(roc_auc_score(y_cv, y_proba[:,rare_class])*100))
else:
print(' No ROC AUC score for multi-class problems')
elif scoring_parameter == 'jaccard':
accu_all = jaccard_singlelabel(y_cv, y_pred)
print(' Mean Jaccard Similarity = {:,.1f}%'.format(
accu_all*100))
## This is for multi-label problems ##
if count == 0:
zipped = copy.deepcopy(y_pred)
count += 1
else:
zipped = zip(zipped,y_pred)
count += 1
elif scoring_parameter == 'basket_recall':
if count == 0:
zipped = copy.deepcopy(y_pred)
count += 1
else:
zipped = zip(zipped,y_pred)
count += 1
if not Stacking_Flag and performed_ensembling:
if modeltype == 'Regression':
rmsle_calculated_f = rmse(y_cv, y_pred)
print('After multiple models, Ensemble Model Results:')
print(' RMSE Score = %0.5f' %(rmsle_calculated_f,))
print('#############################################################################')
if rmsle_calculated_f < rmsle_calculated_m:
print('Ensembling Models is better than Single Model for this data set.')
error_rate.append(rmsle_calculated_f)
else:
print('Single Model is better than Ensembling Models for this data set.')
error_rate.append(rmsle_calculated_m)
else:
rmsle_calculated_f = balanced_accuracy_score(y_cv,y_pred)
print('After multiple models, Ensemble Model Results:')
rare_pct = y_cv[y_cv==rare_class].shape[0]/y_cv.shape[0]
print(' Balanced Accuracy Score = %0.3f%%' %(
rmsle_calculated_f*100))
print(classification_report(y_cv,y_pred))
print(confusion_matrix(y_cv,y_pred))
print('#############################################################################')
if rmsle_calculated_f > rmsle_calculated_m:
print('Ensembling Models is better than Single Model for this data set.')
error_rate.append(rmsle_calculated_f)
else:
print('Single Model is better than Ensembling Models for this data set.')
error_rate.append(rmsle_calculated_m)
if verbose >= 1:
if Boosting_Flag:
try:
if model_name.lower() == 'catboost':
plot_xgb_metrics(model,catboost_scoring,eval_set,modeltype,'%s Results' %each_target,
model_name)
else:
plot_xgb_metrics(gs.best_estimator_,eval_metric,eval_set,modeltype,'%s Results' %each_target,
model_name)
except:
print('Could not plot Model Evaluation Results Metrics')
else:
try:
plot_RS_params(gs.cv_results_, scoring_parameter, each_target)
except:
print('Could not plot Cross Validation Parameters')
print(' Time taken for this Target (in seconds) = %0.0f' %(time.time()-start_time))
print('Training model on complete Train data and Predicting using give Test Data...')
################ I M P O R T A N T: C O M B I N I N G D A T A ######################
#### This is Second time: we combine train and CV into Train and Test Sets #################
train = part_train.append(part_cv)
important_features = [x for x in list(train) if x not in [each_target]]
############################################################################################
###### Now that we have used partial data to make stacking predictors, we can remove them from consideration!
if Stacking_Flag:
important_features = left_subtract(important_features, addcol)
try:
train.drop(addcol,axis=1, inplace=True)
except:
pass
###### Similarly we will have to create KMeans_Clusters again using full Train data!
if KMeans_Featurizer:
important_features = left_subtract(important_features, km_label)
try:
train.drop(km_label,axis=1, inplace=True)
except:
pass
########################## BINNING SECOND TIME ###############################
new_num_vars = np.array(important_features)[(train[important_features].dtypes==float)].tolist()
## Now we re-use the saved_num_vars which contained a list of num_vars for binning now!
###### Once again we do Entropy Binning on the Full Train Data Set !!
########################## BINNING SECOND TIME ###############################
if Binning_Flag and len(saved_num_vars) > 0:
### when you bin the second time, you have to send in important_features with original
### numeric variables so that it works on binning only those. Otherwise it will fail.
### Do Entropy Binning only if there are numeric variables in the data set! #####
#### When we Bin the second first time, we set the entropy_binning flag to True so
#### that all numeric variables that are binned are removed. This way, only bins remain.
train, num_vars, important_features, test = add_entropy_binning(train, each_target,
orig_num_vars, important_features, test,
modeltype, entropy_binning=True,verbose=verbose)
#### In saved_num_vars we send in all the continuous_vars but we bin only the top few vars.
### Those that are binned are removed from saved_num_vars and the remaining become num_vars
### Our job is to find the names of those original numeric variables which were binned.
### orig_num_vars contains original num vars. num_vars contains binned versions of those vars.
### Those binned variables have now become categorical vars and must be added to imp_cats.
#### Also note that important_features does not contain orig_num_vars which have been erased.
else:
print(' Binning_Flag set to False or there are no numeric vars in data set to be binned')
### Now we add another Feature tied to KMeans clustering using Predictor and Target variables ###
####################### KMEANS SECOND TIME ############################
if KMeans_Featurizer and len(saved_num_vars) > 0:
#### Perform KMeans Featurizer only if there are numeric variables in data set! #########
print('Adding one feature named "KMeans_Clusters" using KMeans_Featurizer...')
km_label = 'KMeans_Clusters'
if modeltype != 'Regression':
#### Make the number of clusters as the same as log10 of number of rows in Train
train_cluster, test_cluster = Transform_KM_Features(train[important_features], train[each_target], test[important_features], num_clusters)
else:
train_cluster, test_cluster = Transform_KM_Features(train[important_features], train[each_target], test[important_features])
#### Now make sure that the cat features are either string or integers ######
print(' Used KMeans to naturally cluster Train predictor variables into %d clusters' %num_clusters)
train[km_label] = train_cluster
if not isinstance(test, str):
test[km_label] = test_cluster
#X_train.drop(each_target,axis=1,inplace=True)
for imp_cat in imp_cats:
train[imp_cat] = train[imp_cat].astype(int)
if not isinstance(test, str):
test[imp_cat] = test[imp_cat].astype(int)
saved_num_vars.append(km_label) ### You need to add it to this variable list for Scaling later!
important_features.append(km_label)
########################## STACKING SECOND TIME ###############################
######### This is where you do Stacking of Multi Model Results into One Column ###
if Stacking_Flag:
#### In order to join, you need X_train to be a Pandas Series here ##
print('CAUTION: Stacking can produce Highly Overfit models on Training Data...')
### In order to avoid overfitting, we are going to learn from a small sample of data
### That is why we are using X_cv to train on and using it to predict on X_train!
addcol, stacks1 = QuickML_Stacking(train[important_features],train[each_target],'',
modeltype, Boosting_Flag, scoring_parameter,verbose)
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
#### The reason we add the word "Partial_Train" is to show that these Stacking results are from Partial Train data!
addcols = copy.deepcopy(addcol)
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
train = train.join(pd.DataFrame(stacks1,index=train.index,
columns=addcols))
##### Leaving multiple columns for Stacking is best! Do not do the average of predictions!
print(' Adding %d Stacking feature(s) to training data' %len(addcols))
if not isinstance(orig_test, str):
### In order to avoid overfitting, we are going to learn from a small sample of data
### That is why we are using X_train to train on and using it to predict on X_test
_, stacks2 = QuickML_Stacking(train[important_features],train[each_target],test[important_features],
modeltype, Boosting_Flag, scoring_parameter,verbose)
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
test = test.join(pd.DataFrame(stacks2,index=test.index,
columns=addcols))
##### Adding multiple columns for Stacking is best! Do not do the average of predictions!
#test = test.join(pd.DataFrame(stacks2.mean(axis=1).round().astype(int),
# columns=[addcol],index=test.index))
###### We make sure that we remove too many features that are highly correlated ! #####
#addcol = remove_variables_using_fast_correlation(train,addcol,corr_limit,verbose)
important_features += addcols
saved_num_vars.append(addcol) ### You need to add it for binning later!
############################################################################################
if len(important_features) == 0:
print('No important features found. Using all input features...')
important_features = copy.deepcopy(saved_important_features)
#important_features = copy.deepcopy(red_preds)
############################################################################################
if model_name.lower() == 'catboost':
print(' Setting best params for CatBoost model from Initial State since you cannot change params to a fitted Catboost model ')
model = xgbm.set_params(**best_params)
print(' Number of Categorical and Integer variables used in CatBoost training = %d' %len(imp_cats))
#### Perform Scaling of Train data a second time using FULL TRAIN data set this time !
#### important_features keeps track of all variables that we need to ensure they are scaled!
train, test = perform_scaling_numeric_vars(train, important_features, test,
model_name, SS)
################ T R A I N I N G M O D E L A S E C O N D T I M E ###################
### The next 2 lines are crucial: if X and y are dataframes, then next 2 should be df's
### They should not be df.values since they will become numpy arrays and XGB will error.
trainm = train[important_features+[each_target]]
red_preds = copy.deepcopy(important_features)
X = trainm[red_preds]
y = trainm[each_target]
eval_set = [()]
##### ############ TRAINING MODEL SECOND TIME WITH FULL_TRAIN AND PREDICTING ON TEST ############
model_start_time = time.time()
if modeltype != 'Regression':
if Imbalanced_Flag:
try:
print('################## Imbalanced Flag Set ############################')
print('Imbalanced Class Training using SMOTE Rare Class Oversampling method...')
model, X, y = training_with_SMOTE(X,y, eval_set, model,
Boosting_Flag, eval_metric,modeltype, model_name,
training=False, minority_class=rare_class,
imp_cats=imp_cats, calibrator_flag=calibrator_flag,
GPU_exists=GPU_exists, params=cpu_params,
verbose=verbose)
if isinstance(model, str):
#### If downsampling model failed, it will just be an empty string, so you can try regular model ###
model = copy.deepcopy(best_model)
print('Error in training Imbalanced model second time. Trying regular model..')
Imbalanced_Flag = False
if calibrator_flag:
model.fit(X, y)
else:
if Boosting_Flag:
#### Set the Verbose to 0 since we don't want too much output ##
if model_name == 'XGBoost':
#### Set the Verbose to 0 since we don't want too much output ##
model.fit(X, y,
eval_metric=eval_metric,verbose=0)
else:
model.fit(X, y, cat_features=imp_cats, plot=False)
else:
model.fit(X, y)
except:
print('Error in training Imbalanced model second time. Trying regular model..')
Imbalanced_Flag = False
if calibrator_flag:
model.fit(X, y)
else:
if Boosting_Flag:
if model_name == 'XGBoost':
#### Set the Verbose to 0 since we don't want too much output ##
model.fit(X, y,
eval_metric=eval_metric,verbose=0)
else:
model.fit(X, y, cat_features=imp_cats, plot=False)
else:
model.fit(X, y)
else:
try:
if calibrator_flag:
model.fit(X, y)
else:
if Boosting_Flag:
if model_name == 'XGBoost':
### Since second time we don't have X_cv, we remove it
model.fit(X, y,
eval_metric=eval_metric,verbose=0)
else:
model.fit(X, y, cat_features=imp_cats, plot=False)
else:
model.fit(X, y)
except:
print('Training regular model second time erroring: Check if Input is correct...')
return
else:
try:
if calibrator_flag:
model.fit(X, y)
else:
if Boosting_Flag:
if model_name == 'XGBoost':
model.fit(X, y,
eval_metric=eval_metric,verbose=0)
else:
model.fit(X, y, cat_features=imp_cats, use_best_model=False, plot=False)
else:
model.fit(X, y)
except:
print('Training model second time is Erroring: Check if Input is correct...')
return
print('Actual Training time taken in seconds = %0.0f' %(time.time()-model_start_time))
## TRAINING OF MODELS COMPLETED. NOW START PREDICTIONS ON TEST DATA ################
#### new_cols is to keep track of new prediction columns we are creating #####
new_cols = []
if not isinstance(orig_test, str):
### If there is a test data frame, then let us predict on it #######
### The next 3 lines are crucial: if X and y are dataframes, then next 2 should be df's
### They should not be df.values since they will become numpy arrays and XGB will error.
try:
#### We need the id columns to carry over into the predictions ####
testm = orig_test[id_cols].join(test[red_preds])
except:
### if for some reason id columns are not available, then do without it
testm = test[red_preds]
X_test = testm[red_preds]
else:
##### If there is no Test file, then do a final prediction on Train itself ###
orig_index = orig_train.index
trainm = train.reindex(index = orig_index)
testm = orig_train[id_cols].join(trainm[red_preds])
X_test = testm[red_preds]
if modeltype == 'Regression':
y_pred = model.predict(X_test)
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(y_pred,pd.Series):
y_pred = y_pred.values
######## This is for Regression Problems Only ###########
###### If Stacking_ Flag is False, then we do Ensembling #######
if not Stacking_Flag:
try:
new_cols = []
subm = pd.DataFrame()
#### This is for Ensembling Only #####
#### In Test data verbose is set to zero since no results can be obtained!
models_list, ensembles = QuickML_Ensembling(X, y, X_test, '',
modeltype=modeltype, Boosting_Flag=Boosting_Flag,
scoring='', verbose=0)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
testm[new_col] = y_pred
else:
subm[new_col] = ensembles[:,each]
testm[new_col] = ensembles[:,each]
new_cols.append(new_col)
### After this, y_pred is a Series from now on. You need y_pred.values ####
if len(new_cols) == 5:
print(' Calculating weighted average ensemble of %d regressors' %len(new_cols))
ensem_pred = subm[new_cols[-1]]*0.5+0.125*(subm[new_cols[0]]+subm[
new_cols[1]]+subm[new_cols[2]]+subm[new_cols[3]])
else:
print(' Calculating regular average ensemble of %d regressors' %len(cols))
ensem_pred = (subm[new_cols].mean(axis=1))
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(y_pred,pd.Series):
ensem_pred = ensem_pred.values
new_col = each_target+'_Ensembled_predictions'
testm[new_col] = ensem_pred
new_cols.append(new_col)
print('Completed Ensemble predictions on held out data')
except:
print('Could not complete Ensembling predictions on held out data due to Error')
else:
stack_cols, stacksfinal = QuickML_Stacking(X, y, X_test,
modeltype, Boosting_Flag,
scoring_parameter,verbose=verbose)
new_col = each_target+'_Stacked_'+stack_cols[0].split("_")[0]+'_predictions'
if len(stack_cols) == 1:
testm[new_col] = stacksfinal
else:
#### Just average the predictions from each stacked model into a final pred
testm[new_col] = stacksfinal.mean(axis=1)
if not isinstance(sample_submission, str):
sample_submission[each_target] = y_pred
#### If there is a test file, it probably doesn't have target, so add predictions to it!
testm[each_target+'_predictions'] = y_pred
else:
proba_cols = []
######## This is for both Binary and Multi Classification Problems ###########
y_proba = model.predict_proba(X_test)
y_pred = model.predict(X_test)
predicted = copy.deepcopy(y_proba)
if len(classes) <= 2:
predicted [:,0] = (predicted [:,0] >= (1-m_thresh)).astype('int')
predicted [:,1] = (predicted [:,1] > m_thresh).astype('int')
if predicted[:,rare_class].mean()==0 or predicted[:,rare_class].mean()==1:
### If the model is predicting all 0's or all 1's, you need to use a regular threshold
m_thresh = 0.5
print(' Making test Data predictions using regular Threshold = %0.3f' %m_thresh)
else:
### If the model is good with the modified threshold, then you use the modified threshold!
print(' Making test Data predictions using modified Threshold = %0.3f' %m_thresh)
y_pred = predicted[:,rare_class]
else:
##### For multi-class, just make predictions of multiple classes here #######
y_pred = model.predict(X_test)
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(y_pred,pd.Series):
y_pred = y_pred.values.astype(int)
else:
### In a small number of cases, it's an array but has a shape of 1.
### This causes errors later. Hence I have to make it a singleton array.
try:
if y_pred.shape[1] == 1:
y_pred = y_pred.ravel()
except:
y_pred = y_pred.astype(int)
if len(label_dict[each_target]['transformer']) == 0:
######### NO T R A N S F O R M E R L O G I C B E G I N S H E R E ! #####################
### if there is no transformer, then leave the predicted classes as is
classes = label_dict[each_target]['classes']
##### If there is no transformer, you can just predict the classes as is and save it here ###
testm[each_target+'_predictions'] = y_pred
###### If Stacking_Flag is False, then we do Ensembling #######
if not Stacking_Flag:
### Ensembling is not done when the model name is CatBoost ####
new_cols = []
subm = pd.DataFrame()
#### This is for Ensembling Only #####
#### In Test data verbose is set to zero since no results can be obtained!
if len(classes) == 2:
models_list, ensembles = QuickML_Ensembling(X, y, X_test, '',
modeltype='Binary_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=0)
else:
models_list, ensembles = QuickML_Ensembling(X, y, X_test, '',
modeltype='Multi_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=0)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
testm[new_col] = y_pred
else:
subm[new_col] = ensembles[:,each]
testm[new_col] = ensembles[:,each]
new_cols.append(new_col)
### You will need to create probabilities for each class here ####
for each_class in classes:
if isinstance(each_class, str):
proba_col = each_target+'_proba_'+each_class
else:
proba_col = each_target+'_proba_'+str(each_class)
count = int(label_dict[each_target]['dictionary'][each_class])
testm[proba_col] = y_proba[:,count]
proba_cols.append(proba_col)
if not Stacking_Flag:
new_col = each_target+'_Ensembled_predictions'
if len(new_cols) == 5:
print(' Calculating weighted average ensemble of %d classifiers' %len(new_cols))
ensem_pred = np.round(subm[new_cols[-1]]*0.5+0.125*(subm[new_cols[0]]+subm[
new_cols[1]]+subm[new_cols[2]]+subm[new_cols[3]])).astype(int)
else:
print(' Calculating average ensemble of %d classifiers' %len(new_cols))
ensem_pred = (subm[new_cols].mean(axis=1)).astype(int)
else:
stack_cols, stacksfinal = QuickML_Stacking(X, y, X_test,
modeltype, Boosting_Flag,scoring_parameter,verbose)
new_col = each_target+'_Stacked_'+stack_cols[0].split("_")[0]+'_predictions'
ensem_pred = np.argmax(stacksfinal,axis=1)
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(ensem_pred,pd.Series):
ensem_pred = ensem_pred.values
testm[new_col] = ensem_pred
new_cols.append(new_col)
if not isinstance(sample_submission, str):
sample_submission[each_target] = y_pred
else:
######### T R A N S F O R M E R L O G I C B E G I N S H E R E ! #####################
### if there is a transformer, then you must convert the predicted classes to orig classes
classes = label_dict[each_target]['classes']
dic = label_dict[each_target]['dictionary']
transformer = label_dict[each_target]['transformer']
class_nums = label_dict[each_target]['class_nums']
##### If there is a transformer, you must convert predictions to original classes
testm[each_target+'_predictions'] = pd.Series(y_pred).map(transformer).values
for each_class in classes:
if isinstance(each_class, str):
proba_col = each_target+'_proba_'+each_class
else:
proba_col = each_target+'_proba_'+str(each_class)
count = label_dict[each_target]['dictionary'][each_class]
testm[proba_col] = y_proba[:,count]
proba_cols.append(proba_col)
###### If Stacking_ Flag is False, then we do Ensembling #######
if not Stacking_Flag:
subm = pd.DataFrame()
#### This is for Ensembling Only #####
if len(classes) == 2:
models_list, ensembles = QuickML_Ensembling(X, y, X_test, '',
modeltype='Binary_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
else:
models_list, ensembles = QuickML_Ensembling(X, y, X_test, '',
modeltype='Multi_Classification', Boosting_Flag=Boosting_Flag,
scoring='', verbose=verbose)
models_list.append(model_name)
for models, each in zip(models_list, range(len(models_list))):
new_col = each_target+'_'+models+'_predictions'
if each+1 == len(models_list):
subm[new_col] = y_pred
testm[new_col] = pd.Series(y_pred).map(transformer).values
else:
subm[new_col] = ensembles[:,each]
testm[new_col] = pd.Series(ensembles[:,each]).map(transformer).values
new_cols.append(new_col)
### After this, y_pred is a Series from now on. You need y_pred.values ####
if len(cols) == 5:
print(' Calculating weighted average ensemble of %d classifiers' %len(new_cols))
ensem_pred = np.round(subm[new_cols[-1]]*0.5+0.125*(subm[new_cols[0]]+subm[
new_cols[1]]+subm[new_cols[2]]+subm[new_cols[3]])).astype(int)
else:
print(' Calculating regular average ensemble of %d classifiers' %len(new_cols))
ensem_pred = (subm[new_cols].mean(axis=1)).astype(int)
print('########################################################')
##### This next step is very important since some models give series, others give arrays. Very painful!
if isinstance(ensem_pred,pd.Series):
ensem_pred = ensem_pred.values
print('Completed Ensemble predictions on held out data')
new_col = each_target+'_Ensembled_predictions'
else:
stack_cols, stacksfinal = QuickML_Stacking(X, y, X_test,
modeltype, Boosting_Flag,scoring_parameter,verbose)
new_col = each_target+'_Stacked_'+stack_cols[0].split("_")[0]+'_predictions'
ensem_pred = np.argmax(stacksfinal,axis=1)
print('########################################################')
print('Completed Stacked predictions on held out data')
testm[new_col] = pd.Series(ensem_pred).map(transformer).values
new_cols.append(new_col)
if not isinstance(sample_submission, str):
sample_submission[each_target] = pd.Series(y_pred).map(transformer).values
##################### P L O T F E A T U R E I M P O R T A N C E S H E R E ###################
if calibrator_flag:
plot_model = model.base_estimator
else:
plot_model = copy.deepcopy(model)
try:
if Boosting_Flag is None:
### If you don't use absolute values, you won't get the right set of features in order. Make sure!
imp_features_df = pd.DataFrame(abs(plot_model.coef_[0]),
columns=['Feature Importances'],index=important_features).sort_values(
'Feature Importances',ascending=False)
else:
if model_name.lower() == 'xgboost':
##### SHAP requires this step: XGBoost models must have been "predicted"
_ = plot_model.predict(X_test)
### It is possible that in some cases, XGBoost has fewer features than what was sent in.
### In those cases, we need to identify and know which features in XGBoost are in and which are out
#### In that case, we need to find those features and then do a feature importance
dictf = plot_model.get_booster().get_score(importance_type='gain')
if len(left_subtract(plot_model.get_booster().feature_names,important_features)) > 0:
#### If feature names from XGBoost and important_features are not same,you must transform dictf like this!
dicta = dict(zip(plot_model.get_booster().feature_names,important_features))
featdict = dict([(x,dicta[x]) for x in dictf.keys()])
featdict2 = dict([(dicta[x],dictf[x]) for x in featdict.keys()])
imp_features_df = pd.DataFrame(featdict2.values(),index=featdict2.keys(),
columns = ['Feature Importances']).sort_values('Feature Importances',
ascending=False)
else:
#### If the feature names from XGBoost and the important_features are same,
### you can plot dictf immediately!
imp_features_df = pd.DataFrame(dictf.values(),index=dictf.keys(),
columns = ['Feature Importances']).sort_values('Feature Importances',
ascending=False)
elif model_name == 'Forests':
imp_features_df = pd.DataFrame(plot_model.feature_importances_, columns=['Feature Importances'],
index=important_features).sort_values('Feature Importances',
ascending=False)
elif model_name.lower() == 'catboost':
from catboost import Pool
imp_features_df = pd.DataFrame(plot_model.get_feature_importance(
Pool(X_cv, label=y_cv,cat_features=imp_cats)),
columns=['Feature Importances'],
index=important_features).sort_values(
'Feature Importances',ascending=False)
### Now draw the feature importances using the data frame above!
height_size = 5
width_size = 10
color_string = 'byrcmgkbyrcmgkbyrcmgkbyrcmgk'
print('Plotting Feature Importances to explain the output of model')
imp_features_df[:15].plot(kind='barh',title='Feature Importances for predicting %s' %each_target,
figsize=(width_size, height_size), color=color_string);
except:
print('Could not draw feature importance plot due to an error')
########### D R A W SHAP VALUES USING TREE BASED MODELS. THE REST WILL NOT GET SHAP ############
if verbose >= 2:
print('Trying to plot SHAP values if SHAP is installed in this machine...')
try:
if model_name.lower() == 'catboost':
if verbose > 0:
import shap
from catboost import Pool
shap.initjs()
plt.figure()
shap_values = plot_model.get_feature_importance(Pool(X_cv, label=y_cv,cat_features=imp_cats),type="ShapValues")
shap_df = pd.DataFrame(np.c_[X_cv.values,y_cv],columns=[list(X_cv)+[each_target]])
if modeltype == 'Multi_Classification':
for each_i in range(len(classes)):
### This is needed for Catboost models but it is very cumbersome!
### You need to cycle through multiple values of classes from 0 to n_classes-1.
### There is no way to force it in an Ax => so you are stuck printing multiple charts
shap.summary_plot(shap_values[:,each_i,:], shap_df, plot_type="violin")
else:
shap.summary_plot(shap_values, shap_df, plot_type="violin")
else:
import shap
shap.initjs()
#### This works well for RFC and XGBoost for multiclass problems #####
#### This plots a violin plot that is different from the bar chart above!
#### This does not work for CatBoost so try something else!
if model_name.lower() == 'linear':
explainer = shap.LinearExplainer(plot_model, X_test, feature_dependence="independent")
shap_values = explainer.shap_values(X_test)
plt.figure()
shap.summary_plot(shap_values, X_test, plot_type="bar")
if modeltype != 'Regression':
plt.figure()
shap.summary_plot(shap_values, X_test)
elif model_name.lower() == 'forests':
#### This works well for RFC and XGBoost for multiclass problems #####
### It works for both binary and multi-class problems ########
### However, it does NOT work for CatBoost models!
explainer = shap.TreeExplainer(plot_model)
shap_values = explainer.shap_values(X_test)
plt.figure()
shap.summary_plot(shap_values, X_test, plot_type="bar")
### There is no single violin plot for Random Forests in SHAP
#### It actually has multiple outputs so you can loop through it for each class
if modeltype != 'Regression':
for each_i in range(len(classes)):
plt.figure()
shap.summary_plot(shap_values[each_i], X_test)
elif model_name.lower() == 'xgboost':
#### This works well for RFC and XGBoost for multiclass problems #####
### It works for both binary and multi-class problems ########
### However, it does NOT work for CatBoost models!
explainer = shap.TreeExplainer(plot_model)
shap_values = explainer.shap_values(X_test)
plt.figure()
shap.summary_plot(shap_values, X_test, plot_type="bar")
if modeltype != 'Regression':
plt.figure()
shap.summary_plot(shap_values, X_test)
except:
print('Could not plot SHAP values since SHAP is not installed or could not import SHAP in this machine')
print('############### P R E D I C T I O N O N T E S T C O M P L E T E D #################')
print(' Time taken for this Target (in seconds) = %0.0f' %(time.time()-start_time))
## Write the test and submission files to disk ###
print('Writing Output files to disk...')
#############################################################################################
if not isinstance(testm, str):
try:
write_file_to_folder(testm, each_target, each_target+'_'+modeltype+'_'+'test_modified.csv')
##### D R A W K D E P L O T S FOR PROBABILITY OF PREDICTIONS - very useful! #########
if modeltype != 'Regression':
if verbose >= 2:
testm[proba_cols].plot(kind='kde',figsize=(10,6),
title='Predictive Probability Density Chart with suggested threshold in red')
plt.axvline(x=m_thresh, color='r', linestyle='--');
except:
print(' Error: Not able to save test modified file. Skipping...')
#############################################################################################
if isinstance(sample_submission, str):
sample_submission = testm[id_cols+[each_target+'_predictions']]
try:
write_file_to_folder(sample_submission, each_target, each_target+'_'+modeltype+'_'+'submission.csv')
except:
print(' Error: Not able to save submission file. Skipping...')
#############################################################################################
try:
#### Bring trainm back to its original index ###################
orig_index = orig_train.index
trainm = train.reindex(index = orig_index)
write_file_to_folder(trainm, each_target, each_target+'_'+modeltype+'_'+'train_modified.csv')
except:
print(' Error: Not able to save train modified file. Skipping...')
### In case of multi-label models, we will reset the start train and test dataframes to contain new features created
start_train = start_train[target].join(start_train[orig_red_preds])
if not isinstance(orig_test, str):
start_test = start_test[orig_red_preds]
#### Once each target cycle is over, reset the red_preds to the orig_red_preds so we can start over
red_preds = copy.deepcopy(orig_red_preds)
#### Perform Final Multi-Label Operations here since all Labels are finished by now ###
#### Don't change the target here to each_target since this is for multi-label situations only ###
if (scoring_parameter == 'basket_recall' or scoring_parameter == 'jaccard') and modeltype != 'Regression':
y_preds = np.array(list(zipped))
_,_,_,y_actuals = train_test_split(train[red_preds], train[target].values,
test_size=test_size, random_state=seed)
print('Shape of Actuals: %s and Preds: %s' %(y_actuals.shape[0], y_preds.shape[0]))
if y_actuals.shape[0] == y_preds.shape[0]:
if scoring_parameter == 'basket_recall' and len(target) > 1:
accu_all = basket_recall(y_actuals, y_preds).mean()
print(' Mean Basket Recall = {:,.1f}%'.format(
accu_all*100))
elif scoring_parameter == 'jaccard' and len(target) > 1:
## This shows similarity in multi-label situations ####
accu_all = jaccard_multilabel(y_actuals, y_preds)
print(' Mean Jaccard Similarity = %s' %(
accu_all))
## END OF ONE LABEL IN A MULTI LABEL DATA SET ! WHEW ! ###################
print('############### C O M P L E T E D ################')
print('Time Taken in mins = %0.1f for the Entire Process' %((time.time()-start_time)/60))
#return model, imp_features_df.index.tolist(), trainm, testm
return model, important_features, trainm, testm
###############################################################################
def plot_SHAP_values(m,X,modeltype,Boosting_Flag=False,matplotlib_flag=False,verbose=0):
import shap
# load JS visualization code to notebook
if not matplotlib_flag:
shap.initjs();
# explain the model's predictions using SHAP values
explainer = shap.TreeExplainer(m)
shap_values = explainer.shap_values(X)
if not Boosting_Flag is None:
if Boosting_Flag:
# visualize the first prediction's explanation (use matplotlib=True to avoid Javascript)
if verbose > 0 and modeltype != 'Multi_Classification':
shap.summary_plot(shap_values, X, plot_type="violin");
if verbose >= 1:
shap.summary_plot(shap_values, X, plot_type="bar");
else:
shap.summary_plot(shap_values, X, plot_type="bar");
################################################################################
################ Find top features using XGB ###################
################################################################################
from xgboost.sklearn import XGBClassifier
from xgboost.sklearn import XGBRegressor
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2, mutual_info_regression, mutual_info_classif
def find_top_features_xgb(train,preds,numvars,target,modeltype,corr_limit,verbose=0):
"""
This is a fast utility that uses XGB to find top features. You
It returns a list of important features.
Since it is XGB, you dont have to restrict the input to just numeric vars.
You can send in all kinds of vars and it will take care of transforming it. Sweet!
"""
import xgboost as xgb
###################### I M P O R T A N T ##############################################
###### This top_num decides how many top_n features XGB selects in each iteration.
#### There a total of 5 iterations. Hence 5x10 means maximum 50 featues will be selected.
##### If there are more than 50 variables, then maximum 5*25 = 125 variables will be selected
if len(preds) <= 50:
top_num = 10
else:
top_num = 25
###################### I M P O R T A N T ##############################################
#### If there are more than 30 categorical variables in a data set, it is worth reducing features.
#### Otherwise. XGBoost is pretty good at finding the best features whether cat or numeric !
n_splits = 5
max_depth = 8
max_cats = 5
###################### I M P O R T A N T ##############################################
train = copy.deepcopy(train)
preds = copy.deepcopy(preds)
numvars = copy.deepcopy(numvars)
subsample = 0.7
col_sub_sample = 0.7
train = copy.deepcopy(train)
start_time = time.time()
test_size = 0.2
seed = 1
early_stopping = 5
####### All the default parameters are set up now #########
kf = KFold(n_splits=n_splits, random_state=33)
rem_vars = left_subtract(preds,numvars)
catvars = copy.deepcopy(rem_vars)
############ I M P O R T A N T ! I M P O R T A N T ! ######################
##### Removing the Cat Vars selection using Linear Methods since they fail so often.
##### Linear methods such as Chi2 or Mutual Information Score are not great
#### for feature selection since they can't handle large data and provide
#### misleading results for large data sets. Hence I am using XGBoost alone.
#### Also, another method of using Spearman Correlation for CatVars with 100's
#### of variables is very slow. Also, is not very clear is effective: only 3-4 vars
#### are removed. Hence for now, I am not going to use Spearman method. Perhaps later.
##############################################################################
#if len(catvars) > max_cats:
# start_time = time.time()
# important_cats = remove_variables_using_fast_correlation(train,catvars,'spearman',
# corr_limit,verbose)
# if verbose >= 1:
# print('Time taken for reducing highly correlated Categorical vars was %0.0f seconds' %(time.time()-start_time))
#else:
important_cats = copy.deepcopy(catvars)
print('No categorical feature reduction done. All %d Categorical vars selected ' %(len(catvars)))
if len(numvars) > 1:
final_list = remove_variables_using_fast_correlation(train,numvars,'pearson',
corr_limit,verbose)
else:
final_list = copy.deepcopy(numvars)
print(' Adding %s categorical variables to reduced numeric variables of %d' %(
len(important_cats),len(final_list)))
if isinstance(final_list,np.ndarray):
final_list = final_list.tolist()
preds = final_list+important_cats
#######You must convert category variables into integers ###############
for important_cat in important_cats:
if str(train[important_cat].dtype) == 'category':
train[important_cat] = train[important_cat].astype(int)
######## Drop Missing value rows since XGB for some reason #########
######## can't handle missing values in early stopping rounds #######
train.dropna(axis=0,subset=preds+[target],inplace=True)
######## Dont move this train and y definition anywhere else ########
y = train[target]
print('############## F E A T U R E S E L E C T I O N ####################')
important_features = []
if modeltype == 'Regression':
objective = 'reg:squarederror'
model_xgb = XGBRegressor( n_estimators=100,subsample=subsample,objective=objective,
colsample_bytree=col_sub_sample,reg_alpha=0.5, reg_lambda=0.5,
seed=1,n_jobs=-1,random_state=1)
eval_metric = 'rmse'
else:
#### This is for Classifiers only
classes = np.unique(train[target].values)
if len(classes) == 2:
model_xgb = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,
colsample_bytree=col_sub_sample,gamma=1, learning_rate=0.1, max_delta_step=0,
max_depth=max_depth, min_child_weight=1, missing=-999, n_estimators=100,
n_jobs=-1, nthread=None, objective='binary:logistic',
random_state=1, reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1,
seed=1)
eval_metric = 'logloss'
else:
model_xgb = XGBClassifier(base_score=0.5, booster='gbtree', subsample=subsample,
colsample_bytree=col_sub_sample, gamma=1, learning_rate=0.1, max_delta_step=0,
max_depth=max_depth, min_child_weight=1, missing=-999, n_estimators=100,
n_jobs=-1, nthread=None, objective='multi:softmax',
random_state=1, reg_alpha=0.5, reg_lambda=0.5, scale_pos_weight=1,
seed=1)
eval_metric = 'mlogloss'
#### This is where you start to Iterate on Finding Important Features ################
save_xgb = copy.deepcopy(model_xgb)
train_p = train[preds]
if train_p.shape[1] < 10:
iter_limit = 2
else:
iter_limit = int(train_p.shape[1]/5+0.5)
print('Current number of predictors = %d ' %(train_p.shape[1],))
print(' Finding Important Features using Boosted Trees algorithm...')
try:
for i in range(0,train_p.shape[1],iter_limit):
new_xgb = copy.deepcopy(save_xgb)
print(' using %d variables...' %(train_p.shape[1]-i))
if train_p.shape[1]-i < iter_limit:
X = train_p.iloc[:,i:]
if modeltype == 'Regression':
train_part = int((1-test_size)*X.shape[0])
X_train, X_cv, y_train, y_cv = X[:train_part],X[train_part:],y[:train_part],y[train_part:]
else:
X_train, X_cv, y_train, y_cv = train_test_split(X, y,
test_size=test_size, random_state=seed)
try:
eval_set = [(X_train,y_train),(X_cv,y_cv)]
model_xgb.fit(X_train,y_train,early_stopping_rounds=early_stopping,eval_set=eval_set,
eval_metric=eval_metric,verbose=False)
important_features += pd.Series(model_xgb.get_booster().get_score(
importance_type='gain')).sort_values(ascending=False)[:top_num].index.tolist()
except:
new_xgb.fit(X_train,y_train,early_stopping_rounds=early_stopping,eval_set=eval_set,
eval_metric=eval_metric,verbose=False)
print('XGB has a bug in version xgboost 1.02 for feature importances. Try to install version 0.90 or 1.10 - continuing...')
important_features += pd.Series(new_xgb.get_booster().get_score(
importance_type='gain')).sort_values(ascending=False)[:top_num].index.tolist()
important_features = list(OrderedDict.fromkeys(important_features))
else:
X = train_p[list(train_p.columns.values)[i:train_p.shape[1]]]
#### Split here into train and test #####
if modeltype == 'Regression':
train_part = int((1-test_size)*X.shape[0])
X_train, X_cv, y_train, y_cv = X[:train_part],X[train_part:],y[:train_part],y[train_part:]
else:
X_train, X_cv, y_train, y_cv = train_test_split(X, y,
test_size=test_size, random_state=seed)
eval_set = [(X_train,y_train),(X_cv,y_cv)]
try:
model_xgb.fit(X_train,y_train,early_stopping_rounds=early_stopping,
eval_set=eval_set,eval_metric=eval_metric,verbose=False)
important_features += pd.Series(model_xgb.get_booster().get_score(
importance_type='gain')).sort_values(ascending=False)[:top_num].index.tolist()
except:
new_xgb.fit(X_train,y_train,early_stopping_rounds=early_stopping,
eval_set=eval_set,eval_metric=eval_metric,verbose=False)
important_features += pd.Series(model_xgb.get_booster().get_score(
importance_type='gain')).sort_values(ascending=False)[:top_num].index.tolist()
important_features = list(OrderedDict.fromkeys(important_features))
except:
print('Finding top features using XGB is crashing. Continuing with all predictors...')
important_features = copy.deepcopy(preds)
return important_features, [], []
important_features = list(OrderedDict.fromkeys(important_features))
print('Found %d important features' %len(important_features))
#print(' Time taken (in seconds) = %0.0f' %(time.time()-start_time))
numvars = [x for x in numvars if x in important_features]
important_cats = [x for x in important_cats if x in important_features]
return important_features, numvars, important_cats
################################################################################
def basket_recall(label, pred):
"""
This tests the recall of a given basket of items in a label by the second basket, pred.
It compares the 2 baskets (arrays or lists) named as label and pred, and finds common items
between the two. Then it divides that length by the total number of items in the label basket
to come up with a basket recall score. This score may be useful in recommendation problems
where you are interested in finding how many items in a basket (labels) that your
predictions (pred) basket got correct. The order of the items in the baskets does not matter.
"""
if isinstance(label, list):
label = np.array(label)
if isinstance(pred, list):
pred = np.array(pred)
if len(label) > 1:
jacc_arr = []
for row1,row2,count in zip(label,pred, range(len(label))):
intersection = len(np.intersect1d(row1,row2))
union = len(row1)
jacc = float(intersection / union)
if count == 0:
jacc_arr = copy.deepcopy(jacc)
else:
jacc_arr = np.r_[jacc_arr,jacc]
return jacc_arr
else:
intersection = len(list(set(list1).intersection(set(list2))))
union = (len(list1) + len(list2)) - intersection
jacc_arr = float(intersection / union)
return jacc_arr
################################################################################
def jaccard_singlelabel(label, pred):
"""
This compares 2 baskets (could be lists or arrays): label and pred, and finds common items
between the two. Then it divides that number by either rows or columns to return %.
### Jaccard_Columnwise = this means you have multi-labels and you want it summed columnwise
### This tells you what is the average accuracy for each column in multi-label target
### It will return as many totals as the number of columns in your multi-label target.
### To get a percentage, you will have to divide it by the number of rows in the data set.
### This percentage gives you the % of rows in each label you got correctly=%Each_Label Accuracy
### This will give you as many percentages as there are labels in your multi-label target.
### Jaccard_Row-wise = this means you have combos but where order matters and you want it compared row-wise
### This tells you how many labels in each row did you get right. THat's accuracy by row.
### It will return as many totals as the number of rows in your data set.
### To get a percentage, you will have to divide it by the number of labels in the data set.
### This percentage gives you the % of labels in each row you got correctly=%Combined_Label_Accuracy
### This will give you a single percentage number for the whole data set
"""
if isinstance(label, list):
label = np.array(label)
if isinstance(pred, list):
pred = np.array(pred)
try:
### This is for Multi-Label Problems ##### Returns 2 results: one number and
### the second is an array with as many items as number of labels in target
jacc_each_label = np.sum(label==pred,axis=0)/label.shape[0]
return jacc_each_label
except:
return 0
################################################################################
def jaccard_multilabel(label, pred):
"""
This compares 2 baskets (could be lists or arrays): label and pred, and finds common items
between the two. Then it divides that number by either rows or columns to return %.
### Jaccard_Columnwise = this means you have multi-labels and you want it summed columnwise
### This tells you what is the average accuracy for each column in multi-label target
### It will return as many totals as the number of columns in your multi-label target.
### To get a percentage, you will have to divide it by the number of rows in the data set.
### This percentage gives you the % of rows in each label you got correctly=%Each_Label Accuracy
### This will give you as many percentages as there are labels in your multi-label target.
### Jaccard_Row-wise = this means you have combos but where order matters and you want it compared row-wise
### This tells you how many labels in each row did you get right. THat's accuracy by row.
### It will return as many totals as the number of rows in your data set.
### To get a percentage, you will have to divide it by the number of labels in the data set.
### This percentage gives you the % of labels in each row you got correctly=%Combined_Label_Accuracy
### This will give you a single percentage number for the whole data set
"""
if isinstance(label, list):
label = np.array(label)
if isinstance(pred, list):
pred = np.array(pred)
### This is for Multi-Label Problems ##### Returns 2 results: one number and
### the second is an array with as many items as number of labels in target
try:
jacc_data_set = np.sum(label==pred,axis=1).sum()/label.shape[1]
return jacc_data_set
except:
return 0
################################################################################
def plot_RS_params(cv_results, score, mname):
"""
####### This plots the GridSearchCV Results sent in ############
"""
df = pd.DataFrame(cv_results)
params = [x for x in list(df) if x.startswith('param_')]
traincols = ['mean_train_score' ]
testcols = ['mean_test_score' ]
cols = traincols+testcols
ncols = 2
noplots = len(params)
if noplots%ncols == 0:
rows = noplots/ncols
else:
rows = (noplots/ncols)+1
height_size = 5
width_size = 15
fig = plt.figure(figsize=(width_size,rows*height_size))
fig.suptitle('Training and Validation: Hyper Parameter Tuning for target=%s' %mname, fontsize=20,y=1.01)
#### If the values are negative, convert them to positive ############
if len(df.loc[df[cols[0]]<0]) > 0:
df[cols] = df[cols]*-1
for each_param, count in zip(params, range(noplots)):
plt.subplot(rows,ncols,count+1)
ax1 = plt.gca()
if df[each_param].dtype != object:
df[[each_param]+cols].groupby(each_param).mean().plot(kind='line',
title='%s for %s' %(each_param,mname),ax=ax1)
else:
try:
df[each_param] = pd.to_numeric(df[each_param])
df[[each_param]+cols].groupby(each_param).mean().plot(kind='line',
title='%s for %s' %(each_param,mname), ax=ax1)
except:
df[[each_param]+cols].groupby(each_param).mean().plot(kind='bar',stacked=False,
title='%s for %s' %(each_param,mname), ax=ax1)
#### This is to plot the test_mean_score against params to see how it increases
for each_param in params:
#### This is to find which parameters are non string and convert them to strings
if df[each_param].dtype!=object:
df[each_param] = df[each_param].astype(str)
try:
df['combined_parameters'] = df[params].apply(lambda x: '__'.join(x), axis=1 )
except:
df['combined_parameters'] = df[params].apply(lambda x: '__'.join(x.map(str)), axis=1 )
if len(params) == 1:
df['combined_parameters'] = copy.deepcopy(df[params])
else:
df[['combined_parameters']+cols].groupby('combined_parameters').mean().sort_values(
cols[1]).plot(figsize=(width_size,height_size),kind='line',subplots=False,
title='Combined Parameters: %s scores for %s' %(score,mname))
plt.xticks(rotation=45)
plt.show();
return df
################################################################################
def plot_xgb_metrics(model,eval_metric,eval_set,modeltype,model_label='',model_name=""):
height_size = 5
width_size = 10
if model_name.lower() == 'catboost':
results = model.get_evals_result()
else:
results = model.evals_result()
res_keys = list(results.keys())
eval_metric = list(results[res_keys[0]].keys())
if isinstance(eval_metric, list):
# plot log loss
eval_metric = eval_metric[0]
# plot metrics now
fig, ax = plt.subplots(figsize=(width_size, height_size))
epochs = len(results[res_keys[0]][eval_metric])
x_axis = range(0, epochs)
if model_name.lower() == 'catboost':
ax.plot(x_axis, results[res_keys[0]][eval_metric], label='%s' %res_keys[0])
else:
ax.plot(x_axis, results[res_keys[0]][eval_metric], label='%s' %res_keys[0])
epochs = len(results[res_keys[-1]][eval_metric])
x_axis = range(0, epochs)
ax.plot(x_axis, results[res_keys[-1]][eval_metric], label='%s' %res_keys[-1])
ax.legend()
plt.ylabel(eval_metric)
plt.title('%s Train and Validation Metrics across Epochs (Early Stopping in effect)' %model_label)
plt.show();
################################################################################
######### NEW And FAST WAY to CLASSIFY COLUMNS IN A DATA SET #######
################################################################################
def classify_columns(df_preds, verbose=0):
"""
Takes a dataframe containing only predictors to be classified into various types.
DO NOT SEND IN A TARGET COLUMN since it will try to include that into various columns.
Returns a data frame containing columns and the class it belongs to such as numeric,
categorical, date or id column, boolean, nlp, discrete_string and cols to delete...
####### Returns a dictionary with 10 kinds of vars like the following: # continuous_vars,int_vars
# cat_vars,factor_vars, bool_vars,discrete_string_vars,nlp_vars,date_vars,id_vars,cols_delete
"""
max_cols_to_print = 30
print('############## C L A S S I F Y I N G V A R I A B L E S ####################')
print('Classifying variables in data set...')
#### Cat_Limit defines the max number of categories a column can have to be called a categorical colum
cat_limit = 15
def add(a,b):
return a+b
train = df_preds[:]
sum_all_cols = dict()
orig_cols_total = train.shape[1]
#Types of columns
cols_delete = [col for col in list(train) if (len(train[col].value_counts()) == 1
) | (train[col].isnull().sum()/len(train) >= 0.90)]
train = train[left_subtract(list(train),cols_delete)]
var_df = pd.Series(dict(train.dtypes)).reset_index(drop=False).rename(
columns={0:'type_of_column'})
sum_all_cols['cols_delete'] = cols_delete
var_df['bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['bool','object']
and len(train[x['index']].value_counts()) == 2 else 0, axis=1)
string_bool_vars = list(var_df[(var_df['bool'] ==1)]['index'])
sum_all_cols['string_bool_vars'] = string_bool_vars
var_df['num_bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in [np.uint8,
np.uint16, np.uint32, np.uint64,
'int8','int16','int32','int64',
'float16','float32','float64'] and len(
train[x['index']].value_counts()) == 2 else 0, axis=1)
num_bool_vars = list(var_df[(var_df['num_bool'] ==1)]['index'])
sum_all_cols['num_bool_vars'] = num_bool_vars
###### This is where we take all Object vars and split them into diff kinds ###
discrete_or_nlp = var_df.apply(lambda x: 1 if x['type_of_column'] in ['object'] and x[
'index'] not in string_bool_vars+cols_delete else 0,axis=1)
######### This is where we figure out whether a string var is nlp or discrete_string var ###
var_df['nlp_strings'] = 0
var_df['discrete_strings'] = 0
var_df['cat'] = 0
var_df['id_col'] = 0
discrete_or_nlp_vars = var_df.loc[discrete_or_nlp==1]['index'].values.tolist()
if len(var_df.loc[discrete_or_nlp==1]) != 0:
for col in discrete_or_nlp_vars:
#### first fill empty or missing vals since it will blowup ###
train[col] = train[col].fillna(' ')
if train[col].map(lambda x: len(x) if type(x)==str else 0).mean(
) >= 50 and len(train[col].value_counts()
) <= len(train) and col not in string_bool_vars:
var_df.loc[var_df['index']==col,'nlp_strings'] = 1
elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()
) <= len(train) and col not in string_bool_vars:
var_df.loc[var_df['index']==col,'discrete_strings'] = 1
elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()
) == len(train) and col not in string_bool_vars:
var_df.loc[var_df['index']==col,'id_col'] = 1
else:
var_df.loc[var_df['index']==col,'cat'] = 1
nlp_vars = list(var_df[(var_df['nlp_strings'] ==1)]['index'])
sum_all_cols['nlp_vars'] = nlp_vars
discrete_string_vars = list(var_df[(var_df['discrete_strings'] ==1) ]['index'])
sum_all_cols['discrete_string_vars'] = discrete_string_vars
###### This happens only if a string column happens to be an ID column #######
#### DO NOT Add this to ID_VARS yet. It will be done later.. Dont change it easily...
#### Category DTYPE vars are very special = they can be left as is and not disturbed in Python. ###
var_df['dcat'] = var_df.apply(lambda x: 1 if str(x['type_of_column'])=='category' else 0,
axis=1)
factor_vars = list(var_df[(var_df['dcat'] ==1)]['index'])
sum_all_cols['factor_vars'] = factor_vars
########################################################################
date_or_id = var_df.apply(lambda x: 1 if x['type_of_column'] in [np.uint8,
np.uint16, np.uint32, np.uint64,
'int8','int16',
'int32','int64'] and x[
'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,
axis=1)
######### This is where we figure out whether a numeric col is date or id variable ###
var_df['int'] = 0
var_df['date_time'] = 0
### if a particular column is date-time type, now set it as a date time variable ##
var_df['date_time'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['<M8[ns]','datetime64[ns]'] and x[
'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,
axis=1)
### this is where we save them as date time variables ###
if len(var_df.loc[date_or_id==1]) != 0:
for col in var_df.loc[date_or_id==1]['index'].values.tolist():
if len(train[col].value_counts()) == len(train):
if train[col].min() < 1900 or train[col].max() > 2050:
var_df.loc[var_df['index']==col,'id_col'] = 1
else:
try:
pd.to_datetime(train[col],infer_datetime_format=True)
var_df.loc[var_df['index']==col,'date_time'] = 1
except:
var_df.loc[var_df['index']==col,'id_col'] = 1
else:
if train[col].min() < 1900 or train[col].max() > 2050:
if col not in num_bool_vars:
var_df.loc[var_df['index']==col,'int'] = 1
else:
try:
pd.to_datetime(train[col],infer_datetime_format=True)
var_df.loc[var_df['index']==col,'date_time'] = 1
except:
if col not in num_bool_vars:
var_df.loc[var_df['index']==col,'int'] = 1
else:
pass
int_vars = list(var_df[(var_df['int'] ==1)]['index'])
date_vars = list(var_df[(var_df['date_time'] == 1)]['index'])
id_vars = list(var_df[(var_df['id_col'] == 1)]['index'])
sum_all_cols['int_vars'] = int_vars
copy_date_vars = copy.deepcopy(date_vars)
for date_var in copy_date_vars:
#### This test is to make sure sure date vars are actually date vars
try:
pd.to_datetime(train[date_var],infer_datetime_format=True)
except:
##### if not a date var, then just add it to delete it from processing
cols_delete.append(date_var)
date_vars.remove(date_var)
sum_all_cols['date_vars'] = date_vars
sum_all_cols['id_vars'] = id_vars
sum_all_cols['cols_delete'] = cols_delete
## This is an EXTREMELY complicated logic for cat vars. Don't change it unless you test it many times!
var_df['numeric'] = 0
float_or_cat = var_df.apply(lambda x: 1 if x['type_of_column'] in ['float16',
'float32','float64'] else 0,
axis=1)
if len(var_df.loc[float_or_cat == 1]) > 0:
for col in var_df.loc[float_or_cat == 1]['index'].values.tolist():
if len(train[col].value_counts()) > 2 and len(train[col].value_counts()
) <= cat_limit and len(train[col].value_counts()) != len(train):
var_df.loc[var_df['index']==col,'cat'] = 1
else:
if col not in num_bool_vars:
var_df.loc[var_df['index']==col,'numeric'] = 1
cat_vars = list(var_df[(var_df['cat'] ==1)]['index'])
continuous_vars = list(var_df[(var_df['numeric'] ==1)]['index'])
######## V E R Y I M P O R T A N T ###################################################
##### There are a couple of extra tests you need to do to remove abberations in cat_vars ###
cat_vars_copy = copy.deepcopy(cat_vars)
for cat in cat_vars_copy:
if df_preds[cat].dtype==float:
continuous_vars.append(cat)
cat_vars.remove(cat)
var_df.loc[var_df['index']==cat,'cat'] = 0
var_df.loc[var_df['index']==cat,'numeric'] = 1
elif len(df_preds[cat].value_counts()) == df_preds.shape[0]:
id_vars.append(cat)
cat_vars.remove(cat)
var_df.loc[var_df['index']==cat,'cat'] = 0
var_df.loc[var_df['index']==cat,'id_col'] = 1
sum_all_cols['cat_vars'] = cat_vars
sum_all_cols['continuous_vars'] = continuous_vars
sum_all_cols['id_vars'] = id_vars
###### This is where you consoldate the numbers ###########
var_dict_sum = dict(zip(var_df.values[:,0], var_df.values[:,2:].sum(1)))
for col, sumval in var_dict_sum.items():
if sumval == 0:
print('%s of type=%s is not classified' %(col,train[col].dtype))
elif sumval > 1:
print('%s of type=%s is classified into more then one type' %(col,train[col].dtype))
else:
pass
############### This is where you print all the types of variables ##############
####### Returns 8 vars in the following order: continuous_vars,int_vars,cat_vars,
### string_bool_vars,discrete_string_vars,nlp_vars,date_or_id_vars,cols_delete
if verbose >= 1:
print(" Number of Numeric Columns = ", len(continuous_vars))
print(" Number of Integer-Categorical Columns = ", len(int_vars))
print(" Number of String-Categorical Columns = ", len(cat_vars))
print(" Number of Factor-Categorical Columns = ", len(factor_vars))
print(" Number of String-Boolean Columns = ", len(string_bool_vars))
print(" Number of Numeric-Boolean Columns = ", len(num_bool_vars))
print(" Number of Discrete String Columns = ", len(discrete_string_vars))
print(" Number of NLP String Columns = ", len(nlp_vars))
print(" Number of Date Time Columns = ", len(date_vars))
print(" Number of ID Columns = ", len(id_vars))
print(" Number of Columns to Delete = ", len(cols_delete))
if verbose >= 2:
print('Printing first %d columns by each type of column:' %max_cols_to_print)
print(" Numeric Columns: %s" %continuous_vars[:max_cols_to_print])
print(" Integer-Categorical Columns: %s" %int_vars[:max_cols_to_print])
print(" String-Categorical Columns: %s" %cat_vars[:max_cols_to_print])
print(" Factor-Categorical Columns: %s" %factor_vars[:max_cols_to_print])
print(" String-Boolean Columns: %s" %string_bool_vars[:max_cols_to_print])
print(" Numeric-Boolean Columns: %s" %num_bool_vars[:max_cols_to_print])
print(" Discrete String Columns: %s" %discrete_string_vars[:max_cols_to_print])
print(" NLP text Columns: %s" %nlp_vars[:max_cols_to_print])
print(" Date Time Columns: %s" %date_vars[:max_cols_to_print])
print(" ID Columns: %s" %id_vars[:max_cols_to_print])
print(" Columns that will not be considered in modeling: %s" %cols_delete[:max_cols_to_print])
##### now collect all the column types and column names into a single dictionary to return!
len_sum_all_cols = reduce(add,[len(v) for v in sum_all_cols.values()])
if len_sum_all_cols == orig_cols_total:
print(' %d Predictors classified...' %orig_cols_total)
print(' This does not include the Target column(s)')
else:
print('No of columns classified %d does not match %d total cols. Continuing...' %(
len_sum_all_cols, orig_cols_total))
ls = sum_all_cols.values()
flat_list = [item for sublist in ls for item in sublist]
print(' Missing columns = %s' %set(list(train))-set(flat_list))
return sum_all_cols
#################################################################################
def left_subtract(l1,l2):
lst = []
for i in l1:
if i not in l2:
lst.append(i)
return lst
################################################################################
from sklearn.feature_selection import chi2, mutual_info_regression, mutual_info_classif
from sklearn.feature_selection import SelectKBest
################################################################################
from collections import defaultdict
from collections import OrderedDict
import time
def return_dictionary_list(lst_of_tuples):
""" Returns a dictionary of lists if you send in a list of Tuples"""
orDict = defaultdict(list)
# iterating over list of tuples
for key, val in lst_of_tuples:
orDict[key].append(val)
return orDict
##################################################################################
def count_freq_in_list(lst):
"""
This counts the frequency of items in a list but MAINTAINS the order of appearance of items.
This order is very important when you are doing certain functions. Hence this function!
"""
temp=np.unique(lst)
result = []
for i in temp:
result.append((i,lst.count(i)))
return result
##################################################################################
def find_corr_vars(correlation_dataframe,corr_limit = 0.70):
"""
This returns a dictionary of counts of each variable and how many vars it is correlated to in the dataframe
"""
flatten = lambda l: [item for sublist in l for item in sublist]
flatten_items = lambda dic: [x for x in dic.items()]
a = correlation_dataframe.values
col_index = correlation_dataframe.columns.tolist()
index_triupper = list(zip(np.triu_indices_from(a,k=1)[0],np.triu_indices_from(a,k=1)[1]))
high_corr_index_list = [x for x in np.argwhere(abs(a[np.triu_indices(len(a), k = 1)])>=corr_limit)]
low_corr_index_list = [x for x in np.argwhere(abs(a[np.triu_indices(len(a), k = 1)])<corr_limit)]
tuple_list = [y for y in [index_triupper[x[0]] for x in high_corr_index_list]]
correlated_pair = [(col_index[tuple[0]],col_index[tuple[1]]) for tuple in tuple_list]
correlated_pair_dict = dict(correlated_pair)
flat_corr_pair_list = [item for sublist in correlated_pair for item in sublist]
#### You can make it a dictionary or a tuple of lists. We have chosen the latter here to keep order intact.
#corr_pair_count_dict = Counter(flat_corr_pair_list)
corr_pair_count_dict = count_freq_in_list(flat_corr_pair_list)
corr_list = list(set(flatten(flatten_items(correlated_pair_dict))))
rem_col_list = left_subtract(list(correlation_dataframe),list(OrderedDict.fromkeys(flat_corr_pair_list)))
return corr_pair_count_dict, rem_col_list, corr_list, correlated_pair_dict
################################################################################
from collections import OrderedDict, Counter
def remove_variables_using_fast_correlation(df,numvars,corr_type='pearson',corr_limit = 0.70,verbose=0):
"""
Removes variables that are highly correlated using a pair-wise
high-correlation knockout method. It is highly efficient and hence can work on thousands
of variables in less than a minute, even on a laptop. Only send in a list of numeric
variables, otherwise, it will blow-up!
Correlation = 0.70 This is the highest correlation that any two variables can have.
Above this, and one of them gets knocked out: this is decided in the shootout stage
after the initial round of cutoffs for pair-wise correlations...It returns a list of
clean variables that are uncorrelated (atleast in a pair-wise sense).
"""
flatten = lambda l: [item for sublist in l for item in sublist]
flatten_items = lambda dic: [x for x in dic.items()]
flatten_keys = lambda dic: [x for x in dic.keys()]
flatten_values = lambda dic: [x for x in dic.values()]
start_time = time.time()
print('############## F E A T U R E S E L E C T I O N ####################')
print('Removing highly correlated features among %d variables using %s correlation...' %(len(numvars),corr_type))
corr_pair_count_dict, rem_col_list, temp_corr_list,correlated_pair_dict = find_corr_vars(df[numvars].corr(corr_type))
temp_dict = Counter(flatten(flatten_items(correlated_pair_dict)))
temp_corr_list = []
for name, count in temp_dict.items():
if count >= 2:
temp_corr_list.append(name)
temp_uncorr_list = []
for name, count in temp_dict.items():
if count == 1:
temp_uncorr_list.append(name)
### Do another correlation test to remove those that are correlated to each other ####
corr_pair_count_dict2, rem_col_list2 , temp_corr_list2, correlated_pair_dict2 = find_corr_vars(
df[rem_col_list+temp_uncorr_list].corr(corr_type),corr_limit)
final_dict = Counter(flatten(flatten_items(correlated_pair_dict2)))
#### Make sure that these lists are sorted and compared. Otherwise, you will get False compares.
if temp_corr_list2.sort() == temp_uncorr_list.sort():
### if what you sent in, you got back the same, then you now need to pick just one:
### either keys or values of this correlated_pair_dictionary. Which one to pick?
### Here we select the one which has the least overall correlation to rem_col_list
#### The reason we choose overall mean rather than absolute mean is the same reason in finance
#### A portfolio that has lower overall mean is better than a portfolio with higher correlation
corr_keys_mean = df[rem_col_list+flatten_keys(correlated_pair_dict2)].corr(corr_type).mean().mean()
corr_values_mean = df[rem_col_list+flatten_values(correlated_pair_dict2)].corr(corr_type).mean().mean()
if corr_keys_mean <= corr_values_mean:
final_uncorr_list = flatten_keys(correlated_pair_dict2)
else:
final_uncorr_list = flatten_values(correlated_pair_dict2)
else:
final_corr_list = []
for name, count in final_dict.items():
if count >= 2:
final_corr_list.append(name)
final_uncorr_list = []
for name, count in final_dict.items():
if count == 1:
final_uncorr_list.append(name)
#### Once we have chosen a few from the highest corr list, we add them to the highest uncorr list#####
selected = copy.deepcopy(final_uncorr_list)
##### Now we have reduced the list of vars and these are ready to be used ####
final_list = list(OrderedDict.fromkeys(selected + rem_col_list))
if int(len(numvars)-len(final_list)) == 0:
print(' No variables were removed since no highly correlated variables found in data')
else:
print(' Number of variables removed due to high correlation = %d ' %(len(numvars)-len(final_list)))
if verbose == 2:
if len(left_subtract(numvars, final_list)) > 0:
print(' List of variables removed: %s' %(left_subtract(numvars, final_list)))
return final_list
################################################################################
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Lasso, Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import GridSearchCV
import re
def add_poly_vars_select(data,numvars,targetvar,modeltype,poly_degree=2,Add_Poly=2,md='',corr_limit=0.70,
scaling=True, fit_flag=False, verbose=0):
"""
#### This adds Polynomial and Interaction Variables of any Size to a data set and returns the best vars
among those poly and interaction variables. Notice you will get a list of variables as well as the modified
data set with the old and new (added) variables. Very Convenient when you want to do quick testing.
There are 3 settings for Add_Poly flag: 0 => No poly or intxn variables added. 1=> Only intxn vars
added. 2=> only polynomial degree (squared) vars added. 3=> both squared and intxn vars added.
If Fit_Flag=True, then it is assumed that it is Training data and hence variables are selected
If Fit_Flag=False, then it is assumed it is Test data and no variables are chosen but we keep training ones.
"""
tolerance = 0.01
orig_data_index = data.index
if modeltype == 'Regression':
lm = Lasso(alpha=0.001, max_iter=2000,
fit_intercept=True, normalize=False)
else:
lm = LogisticRegression(C=0.01,fit_intercept=True,tol=tolerance,
max_iter=2000,solver='liblinear',n_jobs=-1,
penalty='l2',dual=False, random_state=0)
predictors = copy.deepcopy(numvars)
#### number of original features in data set ####
n_orig_features = len(predictors)
selected = []
X_data = data[predictors]
####### Initial Model with all input variables ######
if fit_flag:
Y = data[targetvar]
print('Building Inital Model with given variables...')
print_model_metrics(modeltype,lm,X_data,Y,False)
if scaling == 'Standard':
XS = StandardScaler().fit_transform(X_data)
elif scaling == 'MinMax':
XS = MinMaxScaler().fit_transform(X_data)
### or you can use Centering which simply subtracts the Mean:
elif scaling == 'Centering':
XS = (X_data-X_data.mean())
else:
XS = copy.deepcopy(X_data)
#XS.columns=predictors
X = copy.deepcopy(XS)
######## Here is where the Interaction variable selection begins ############
#print('Adding Polynomial %s-degree and Interaction variables...' %poly_degree)
if Add_Poly == 1:
### If it is 1, add only Interaction variables.
poly = PolynomialFeatures(degree=poly_degree, include_bias = False, interaction_only=True)
elif Add_Poly == 2:
#### If it is 2 or 3 add both Squared and Interaction variables. We will remove interaction
### variables later in this program. For now include both!
poly = PolynomialFeatures(degree=poly_degree, include_bias = False, interaction_only=False)
elif Add_Poly == 3:
#### If it is 2 or 3 add both Squared and Interaction variables. We will remove interaction
### variables later in this program. For now include both!
poly = PolynomialFeatures(degree=poly_degree, include_bias = False, interaction_only=False)
if fit_flag:
md = poly.fit(X) #### This defines the Polynomial feature extraction
try:
XP = md.transform(X) #### This transforms X into a Polynomial Order
except MemoryError:
return predictors, '', X, md, [], dict()
#################################################################################
##### CONVERT X-VARIABLES FROM POLY AND INTERACTION INTO ORIGINAL VARIABLES ###
#################################################################################
xnames = md.get_feature_names() ### xnames contains all x-only, Poly and Intxn variables in x-format
if len(xnames) > 300:
max_iter = 5000
else:
max_iter = 2000
###### BUILDING COMPARISON MODEL WITH INTERACTION VARIABLES ########################
start_time = time.time()
if modeltype == 'Regression':
lm = Lasso(alpha=0.001, max_iter=max_iter,
fit_intercept=True, normalize=False)
else:
lm = LogisticRegression(C=0.01,fit_intercept=True, tol=tolerance,
max_iter=max_iter,solver='liblinear',n_jobs=-1,
penalty='l2',dual=False, random_state=0)
########### Here starts the conversion of X variables into Text feature variable names #####################
XP1 = pd.DataFrame(XP,index=orig_data_index, columns=xnames) ## XP1 has all the Xvars:incl orig+poly+intxn vars
x_vars = xnames[:n_orig_features] ### x_vars contain x_variables such as 'x1'
#### Feature_xvar_dict will map the X_vars, Squared_vars and Intxn_vars back to Text vars in one variable
feature_xvar_dict = dict(zip(x_vars,predictors)) ###
if fit_flag:
#### If there is fitting to be done, then you must do this ###
if Add_Poly == 1: #### This adds only Interaction variables => no Polynomials!
sq_vars = [] ### sq_vars contain only x-squared variables such as 'x^2'
intxn_vars = left_subtract(xnames, sq_vars+x_vars) ### intxn_vars contain interaction vars such as 'x1 x2'
elif Add_Poly == 2: #### This adds only Polynomial variables => no Interactions!
sq_vars = [x for x in xnames if '^2' in x] ### sq_vars contain only x-squared variables such as 'x^2'
intxn_vars = [] ### intxn_vars contain interaction vars such as 'x1 x2'
elif Add_Poly == 3: #### This adds Both Interaction and Polynomial variables => Best of Both worlds!
sq_vars = [x for x in xnames if '^2' in x] ### sq_vars contain only x-squared variables such as 'x^2'
intxn_vars = left_subtract(xnames, sq_vars+x_vars) ### intxn_vars contain interaction vars such as 'x1 x2'
#### It is now time to cut down the original x_variables to just squared variables and originals here ####
dict_vars = dict(zip(predictors,x_vars)) ### this is a dictionary mapping original variables and their x-variables
reverse_dict_vars = dict([(y,x) for (x,y) in dict_vars.items()]) ### this maps the x-vars to original vars
##### Now let's convert Interaction x_variables into their corresponding text variables
intxn_text_vars = []
for each_item in intxn_vars:
if len(each_item.split(" ")) == 1:
intxn_text_vars.append(reverse_dict_vars[each_item])
feature_xvar_dict[each_item] = reverse_dict_vars[each_item]
elif len(each_item.split(" ")) == 2:
two_items_list = each_item.split(" ")
full_intxn_name = reverse_dict_vars[two_items_list[0]] +" "+ reverse_dict_vars[two_items_list[1]]
intxn_text_vars.append(full_intxn_name)
feature_xvar_dict[each_item] = full_intxn_name
else:
pass
##### Now let's convert Squared x_variables into their corresponding text variables
sq_text_vars = []
for each_sq_item in sq_vars:
if len(each_sq_item.split("^")) == 2:
two_item_list = each_sq_item.split("^")
full_sq_name = reverse_dict_vars[two_item_list[0]] +"^2"
sq_text_vars.append(full_sq_name)
feature_xvar_dict[each_sq_item] = full_sq_name
else:
pass
#### Now we need to combine the x_vars, Squared_vars and the Intxn_vars together as Text vars in one variable
full_x_vars = x_vars + sq_vars + intxn_vars
text_vars = predictors + sq_text_vars + intxn_text_vars #### text_vars now contains all the text version of x-variables
if len(text_vars) == len(full_x_vars):
print('Successfully transformed x-variables into text-variables after Polynomial transformations')
else:
print('Error: Not able to transform x-variables into text-variables. Continuing without Poly vars...')
return predictors, lm, XP1, md, x_vars,feature_xvar_dict
feature_textvar_dict = dict([(y,x) for (x,y) in feature_xvar_dict.items()])
#### Now Build a Data Frame containing containing additional Poly and Intxn variables in x-format here ####
new_addx_vars = sq_vars+intxn_vars
if len(new_addx_vars) == 0:
print('Error: There are no squared or interaction vars to add. Continuing without Poly vars...')
return predictors, lm, XP1, md, x_vars,feature_xvar_dict
#### We define 2 data frames: one for removing highly correlated vars and other for Lasso selection
XP2 = XP1[new_addx_vars].join(Y)
XP1X = XP1[new_addx_vars]
new_addtext_vars = [feature_xvar_dict[x] for x in new_addx_vars]
XP2.columns = new_addtext_vars+[targetvar]
XP1X.columns = new_addtext_vars
###################################################################################
#### FAST FEATURE REDUCTION USING L1 REGULARIZATION FOR LARGE DATA SETS ######
#### Use LassoCV or LogisticRegressionCV to Reduce Variables using Regularization
###################################################################################
print('Building Comparison Model with only Poly and Interaction variables...')
lm_p, _ = print_model_metrics(modeltype,lm,XP1X,Y,True)
print(' Time Taken: %0.0f (in seconds)' %(time.time()-start_time))
#### We need to build a dataframe to hold coefficients from the model for each variable ###
dfx = | pd.DataFrame([new_addx_vars, new_addtext_vars]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Python script for automatic quality-control procedures (CEMADEN data)
# # Created on Aug.12.2020
# ### By:
# <NAME>
# <NAME>
# <NAME>
# Importing libraries used in this code
# In[ ]:
import numpy as np
import pandas as pd
from datetime import datetime
import glob
import warnings
from datetime import datetime
import sys
import esda
import libpysal as lps
# Assigning values to main variables and other parameters
# In[ ]:
#Data storage path
path = 'D:/CEMADEN/'
years = [2014 , 2015, 2016, 2017, 2018, 2019,2020] #years used in the analysis
states = ['PE','PB','RN','BA','SE','PI','CE','MA','AL','AC','AM','RO','RR','AP','TO','PA','MT',
'MS','DF','GO','RS','SC','PR','ES','MG','RJ','SP'] #states used in the analysis
#Filters variables
threshold_missing_data_days=60 #days in a year without data
threshold_days_without_rain=200 #days in a row without rain
threshold_constant_days=35 #days in a row with rain = 0,2mm
threshold_max_peak=40 #record of xmm in 10min
#Moran's I variables
properties=['rainfall_events','mean_rainfall_depth','yearly_rainfall'] #properties calculated based on the events durations defined
mits_integer = [60, 360,1439] #mit lenghts used
n_neighbors = 5
p_value = 0.05
# Functions
# ------
# In[ ]:
#This function inserts a beginning and ending date for each code, each year (1st january till 31st december) to compute 365 days
def insert_begin_end(df, year, code):
datatemp=str(year)+'-01-01 00:00:10' #temporary date - beginning
data_b = {'gauge_code': [code],
'city': ['x'],
'state': ['x'],
'datetime': [datatemp], #assigning beginning date to code
'rain_mm': [-1],
}
datatemp=str(year)+'-12-31 23:59:10' #temporary date - end
data_e = {'gauge_code': [code],
'city': ['x'],
'state': ['x'],
'datetime': [datatemp],
'rain_mm': [0],
}
df_b = pd.DataFrame(data_b)
df_e = pd.DataFrame(data_e)
df_b['datetime']=pd.to_datetime(df_b['datetime'])
df_e['datetime']=pd.to_datetime(df_e['datetime'])
df=pd.concat([df, df_e], ignore_index=True)
df=pd.concat([df_b, df], ignore_index=True)
return df
# In[ ]:
#This function goes through all the CEMADEN files from all states, each year, to assemble a dataframe with their gauge codes
def get_df_codes (year, state):
filename = str(state) +'_'+ str(year) + '.h5'
df_cemaden_info = pd.read_hdf(path+'/data/raw data/'+ filename,'table_info')
df_codes = df_cemaden_info['gauge_code']
return df_codes
# In[ ]:
#This function writes the status (HQ or PQ) on each gauge according to it's classification
#from the filters and moran
def write_status (code, year, state, status,filter_flag, df_filtered_gauges):
df_filtered_gauges.at[(code, year),'state']=state
df_filtered_gauges.at[(code, year),'status']=status
df_filtered_gauges.at[(code, year),'filter']=filter_flag
return df_filtered_gauges
# Single gauge tests - Filters
#
# In[ ]:
# 0. Filter All: This function go through all filters in a specific order, and writes the gauge's final status of this step
# the function raises a "flag" if the gauge doesn't fulfill the condition pre stablished, otherwise, it goes through the next filter
def all_filters (code,df_cemaden_data,year,state,df_filtered_gauges):
write=True
flag=False
flag = filter_missing_data_days (code,df_cemaden_data,year,flag,state) #Filter 1 - missing days
if not flag:
flag = filter_consecutive_constant_values (code,df_cemaden_data,year,flag,state) #Filter 2 - consecutive 0.2mm rain days
elif flag and write:
status, filter_flag ='POOR QUALITY','missing_data_days'
df_filtered_gauges= write_status (code,year,state,status,filter_flag,df_filtered_gauges)
write=False
if not flag:
flag = filter_max_peak (code,df_cemaden_data,year,flag,state) #Filter 3 - max peak in 10min
elif flag and write:
status, filter_flag ='POOR QUALITY','consecutive_constant_values'
df_filtered_gauges= write_status (code,year,state,status,filter_flag,df_filtered_gauges)
write=False
if not flag:
flag= filter_consecutive_period_without_rain (code,df_cemaden_data,year,flag,state) #Filter 4- consecutive w/o rain days
elif flag and write:
status, filter_flag ='POOR QUALITY','max_peak'
df_filtered_gauges= write_status (code, year,state,status,filter_flag,df_filtered_gauges)
write=False
if not flag:
status, filter_flag ='HIGH QUALITY','unflagged'
df_filtered_gauges= write_status (code, year,state,status,filter_flag,df_filtered_gauges)
write=False
elif flag and write:
status, filter_flag ='POOR QUALITY','consecutive_period_without_rain'
df_filtered_gauges= write_status (code, year,state,status,filter_flag,df_filtered_gauges)
write=False
return df_filtered_gauges
# In[ ]:
# 1. Filter: Flags all gauges missing xx or more days of data
def filter_missing_data_days (code,df_cemaden_data,year,flag,state):
df_gauge=df_cemaden_data[(df_cemaden_data['gauge_code'] == code )]
df_gauge['rain_mm']=-1 #overwriting all rain values since missing values are substituted by "0"
df_gauge=df_gauge.set_index('datetime')
df_gauge_resample=df_gauge['rain_mm'].resample('D').sum() #resampling to obtain the information in "days"
number_days_year=get_days (year)
days_without_data= number_days_year - df_gauge_resample[(df_gauge_resample <0)].count()
if days_without_data >= threshold_missing_data_days:
flag=True #raising the flag if the gauge has more missing days in the records than the highest threshold defined
return flag
#This function is to identify whether the analyzed year is a leap year
def get_days (year):
if (year%4==0 and year%100!=0) or (year%400==0):
nday_year=366
else:
nday_year=365
return nday_year
# In[ ]:
# 2. Filter: Exclusion of gauges with consecutive constant values (0.2 mm) per some xx days
def filter_consecutive_constant_values (code,df_cemaden_data, year,flag, state):
df_gauge=df_cemaden_data[(df_cemaden_data['gauge_code'] == code ) & (df_cemaden_data['rain_mm']>0)]
df_gauge=df_gauge.set_index('datetime')
t=str(threshold_constant_days)+'D'
df_rolling_mean=df_gauge['rain_mm'].rolling(t).mean()
df_rolling_count=df_gauge['rain_mm'].rolling(t).count()
df_rolling_std=df_gauge['rain_mm'].rolling(t).std()
df_rolling_mean=pd.DataFrame(df_rolling_mean)
df_rolling_mean=df_rolling_mean.rename(columns={'rain_mm':'mean'})
df_rolling_std=pd.DataFrame(df_rolling_std)
df_rolling_std=df_rolling_std.rename(columns={'rain_mm':'std'})
df_rolling_count=pd.DataFrame(df_rolling_count)
df_rolling_count=df_rolling_count.rename(columns={'rain_mm':'count'})
df_rolling_all=pd.concat([df_rolling_mean, df_rolling_std], axis=1)
df_rolling_all=pd.concat([df_rolling_all, df_rolling_count], axis=1)
df_temp=df_rolling_all[(df_rolling_all['mean']< 0.201) & (df_rolling_all['mean']> 0.199) & (df_rolling_all['std']< 0.0001) & (df_rolling_all['count']> 50)] #df['count'] conta quantos pulsos de 0,2 há dentro do período de 10 dias
constant_period=df_temp['count'].count()
if constant_period > 0:
flag=True
return flag
# In[ ]:
# 3. Filter: Flags all gauges with maximum peaks of xx mm or more in 10 minutes
def filter_max_peak (code,df_cemaden_data,year,flag,state):
df_temp=df_cemaden_data[(df_cemaden_data['gauge_code'] == code ) & (df_cemaden_data['rain_mm'] > threshold_max_peak)]
if df_temp['rain_mm'].count()>0:
flag=True #raising the flag if the gauge has a higher max peak in the records than the highest threshold defined
return flag
# In[ ]:
#4. Filter: Flags all gauges with more than xxx consecutive days of null rain records
def filter_consecutive_period_without_rain (code,df_cemaden_data, year,flag, state):
df_gauge=df_cemaden_data[(df_cemaden_data['gauge_code'] == code ) & (df_cemaden_data['rain_mm']>0)]
df_gauge['rain_mm']=-1 #overwriting all rain values since missing values are substituted by "0"
df_gauge=insert_begin_end(df_gauge, year, code)
df_gauge=df_gauge.set_index('datetime')
df_gauge_resample=df_gauge['rain_mm'].resample('10Min').sum()
df_gauge_resample=pd.DataFrame(df_gauge_resample)
t=str(threshold_days_without_rain)+'D'
df_rolling=df_gauge_resample['rain_mm'].rolling(t).sum() #rolling window to count records at the threshold number of days scale
consecutive_period_without_rain= df_rolling[(df_rolling >= 0)].count()
if consecutive_period_without_rain > 0:
flag=True
return flag
# Spatial Analysis - Moran Index
# In[ ]:
def get_spots(df_filter_mit, prop, mit, year):
np.random.seed(999)
y=df_filter_mit[prop]
points = np.array(df_filter_mit[['longitute','latitude']])
wq = lps.weights.KNN(points, n_neighbors)
wq.transform = 'r'
lag_y = lps.weights.lag_spatial(wq, df_filter_mit[prop])
li = esda.moran.Moran_Local(y, wq)
sig = 1 * (li.p_sim < p_value)
hotspot = 1 * (sig * li.q==1)
coldspot = 3 * (sig * li.q==3)
doughnut = 2 * (sig * li.q==2)
diamond = 4 * (sig * li.q==4)
spots = hotspot + coldspot + doughnut + diamond
df_spots=pd.DataFrame(spots)
namecol= prop + '_'+ str(year) +'_'+ str(mit)
df_spots=df_spots.rename(columns={0:namecol})
return df_spots
# # Main Scripts
# Single gauge tests
# In[ ]:
#creating dataframe to be filled with the stations' information and status after single-gauge analysis
df_analyzed_gauges = pd.DataFrame(columns=['code','state','year','status','filter'])
df_analyzed_gauges=df_analyzed_gauges.set_index(['code','year'])
#iterating to analyze all of the years
for year in years:
for state in states:
print(state,year)
n_file= str(state) +'_'+ str(year) + '.h5' #name of hdf file to be opened
df_codes=get_df_codes (year, state) #using function to get the codes for given state and year
df_cemaden_data = | pd.read_hdf(path+'data/raw data/'+ n_file,'table_data') | pandas.read_hdf |
# coding: utf-8
# In[2]:
# Cargamos datos
import Loading_data
from matplotlib import pyplot as plt
import warnings
warnings.filterwarnings('ignore')
df = Loading_data.Get_Nacion()
df.head()
# In[185]:
import pandas as pdg
momo = | pd.read_csv('https://momo.isciii.es/public/momo/data') | pandas.read_csv |
import numpy as np
import pandas as pd
try:
from auto_ml.utils_scoring import advanced_scoring_regressors, advanced_scoring_classifiers
except ImportError:
from ..auto_ml.utils_scoring import advanced_scoring_regressors, advanced_scoring_classifiers
import pathos
from sklearn.base import BaseEstimator, TransformerMixin
class Ensemble(object):
def __init__(self, ensemble_predictors, type_of_estimator, method='average'):
self.ensemble_predictors = ensemble_predictors
self.type_of_estimator = type_of_estimator
self.method = method
# ################################
# Get a dataframe that is all the predictions from all the sub-models
# ################################
# Note that we will get these predictions in parallel (relatively quick)
def get_all_predictions(self, df):
def get_predictions_for_one_estimator(estimator, df):
estimator_name = estimator.named_steps['final_model'].name
if self.type_of_estimator == 'regressor':
predictions = estimator.predict(df)
else:
# For classifiers
predictions = list(estimator.predict_proba(df))
return_obj = {estimator_name: predictions}
return return_obj
# Don't bother parallelizing if this is a single dictionary
if isinstance(df, dict):
predictions_from_all_estimators = map(lambda predictor: get_predictions_for_one_estimator(predictor, df), self.ensemble_predictors)
predictions_from_all_estimators = list(predictions_from_all_estimators)
else:
# Open a new multiprocessing pool
pool = pathos.multiprocessing.ProcessPool()
# Since we may have already closed the pool, try to restart it
try:
pool.restart()
except AssertionError as e:
pass
# Pathos doesn't like datasets beyond a certain size. So fall back on single, non-parallel predictions instead.
try:
predictions_from_all_estimators = pool.map(lambda predictor: get_predictions_for_one_estimator(predictor, df), self.ensemble_predictors, chunksize=100)
predictions_from_all_estimators = list(predictions_from_all_estimators)
except:
predictions_from_all_estimators = map(lambda predictor: get_predictions_for_one_estimator(predictor, df), self.ensemble_predictors)
predictions_from_all_estimators = list(predictions_from_all_estimators)
# Once we have gotten all we need from the pool, close it so it's not taking up unnecessary memory
pool.close()
try:
pool.join()
except AssertionError:
pass
results = {}
for result_dict in predictions_from_all_estimators:
results.update(result_dict)
# if this is a single row we are getting predictions from, just return a dictionary with single values for all the predictions
if isinstance(df, dict):
return results
else:
predictions_df = | pd.DataFrame.from_dict(results, orient='columns') | pandas.DataFrame.from_dict |
"""
画K线文件,反应策略买入卖出节点。
"""
import os
import sys
import time
import threading
from multiprocessing import Pool, RLock, freeze_support
import numpy as np
import pandas as pd
from tqdm import tqdm
from rich import print as print
import CeLue # 个人策略文件,不分享
import func_TDX
import user_config as ucfg
from pyecharts.charts import Kline, Bar, Grid
from pyecharts.globals import ThemeType
from pyecharts import options as opts
from pyecharts.commons.utils import JsCode
def markareadata(df_stock):
# 生成买点卖点区域标示坐标点
df_celue = df_stock.loc[df_stock['celue_buy'] | df_stock['celue_sell']] # 提取买卖点列
yAxis_max = df_stock['high'].max()
markareadata = []
temp = []
# k是range索引,对应图形第几个点,v是K行的内容,字典类型
for k, v in df_celue.iterrows():
temp.append(
{
"xAxis": k,
# "yAxis": yAxis_max if v['celue_sell'] else 0, # buy点是0,sell点是最大值 填了y坐标会导致图形放大后区域消失
}
)
# 如果temp列表数量到达2,表示起点xy坐标、终点xy坐标生成完毕。添加到markareadata,清空temp重新开始
if len(temp) == 2:
# 给第2组xy坐标字典添加'itemStyle': {'color': '#14b143'}键值对。
# df_celue.at[temp[1]['xAxis'], 'close']为读取对应索引的收盘价。
# 第二组坐标收盘价和第一组坐标收盘价比较,大于则区域颜色是红色表示盈利,小于则绿色亏损
temp[1]["itemStyle"] = {'color': "#ef232a" if df_celue.at[temp[1]['xAxis'], 'close'] > df_celue.at[
temp[0]['xAxis'], 'close'] else "#14b143"}
markareadata.append(temp)
# rprint(markareadata)
temp = []
return markareadata
def marklinedata(df_stock):
# 生成趋势线数据
import math
from func_TDX import SMA, BARSLASTCOUNT
"""
与下面的通达信公式效果完全一致:
现价:CONST(C),COLORLIGRAY,DOTLINE;
MAA10:=MA(CLOSE,55);
高突:=BARSLASTCOUNT(L>MAA10)=9;
低突:=BARSLASTCOUNT(H<MAA10)=9;
高突破:=高突 ;
低突破:=低突 ;
距上次高位置:=BARSLAST(高突破),NODRAW;
距上次低位置:=BARSLAST(低突破),NODRAW;
高过滤:=(高突破 AND REF(距上次高位置,1)>REF(距上次低位置,1));
低过滤:=(低突破 AND REF(距上次低位置,1)>REF(距上次高位置,1));
高0:=BACKSET(高过滤,10);
低0:=BACKSET(低过滤,10);
高1:=CROSS(高0,0.5);
低1:=CROSS(低0,0.5);
距上高位:=BARSLAST(高1),NODRAW;
距上低位:=BARSLAST(低1),NODRAW;
低点:=IF(距上高位 > 距上低位, LLV(L,距上低位+1)=L,0);
低:=FILTERX(低点 AND 距上高位>距上低位,距上低位+1);
高点:=IF(距上高位 < 距上低位, HHV(H,距上高位+1)=H,0);
高:=FILTERX(高点 AND 距上低位>距上高位 ,距上高位+1);
NOTEXT上涨线:DRAWLINE(低 AND BARSLAST(高)>20,L,高 AND BARSLAST(低)>20,H,0),COLORRED,LINETHICK2;
NOTEXT下跌线:DRAWLINE(高 AND BARSLAST(低)>20,H,低 AND BARSLAST(高)>20,L,0),COLORGREEN,LINETHICK2;
"""
df_stock['date'] = pd.to_datetime(df_stock['date'], format='%Y-%m-%d') # 转为时间格式
df_stock.set_index('date', drop=False, inplace=True) # 时间为索引。方便与另外复权的DF表对齐合并
H = df_stock['high']
L = df_stock['low']
C = df_stock['close']
TJ04_均线 = SMA(C, 55)
TJ04_高突破 = BARSLASTCOUNT(L > TJ04_均线) == 9
TJ04_低突破 = BARSLASTCOUNT(H < TJ04_均线) == 9
TJ04_高突破 = pd.DataFrame(TJ04_高突破.loc[TJ04_高突破 == True], columns=["高突破"])
TJ04_低突破 = pd.DataFrame(TJ04_低突破.loc[TJ04_低突破 == True], columns=["低突破"])
TJ04_过滤 = pd.concat([TJ04_高突破, TJ04_低突破]).fillna(value=False).sort_index()
del TJ04_均线, TJ04_高突破, TJ04_低突破
高, 低 = 0, 0
# 过滤高低突破信号循环逻辑:日期由远及近,高低突破信号依次取值,保留各自最相近的一个
for index, row in TJ04_过滤[:].iterrows():
if row['高突破'] and 高 == 1:
TJ04_过滤.drop(index=index, inplace=True)
elif row['低突破'] and 低 == 1:
TJ04_过滤.drop(index=index, inplace=True)
elif row['高突破'] and 高 == 0:
高 = 1
低 = 0
elif row['低突破'] and 低 == 0:
高 = 0
低 = 1
# 寻找阶段高低点
TJ04_过滤.reset_index(drop=False, inplace=True)
TJ04_高低点 = pd.DataFrame()
last_day = None
for index, row in TJ04_过滤.iterrows():
if index == 0:
last_day = row['date']
continue
elif row['高突破']:
s_date = last_day # 日期区间起点
e_date = row['date'] # 日期区间终点
low_date = L.loc[s_date:e_date].idxmin() # 低点日
low_value = L.loc[s_date:e_date].min() # 低点数值
last_day = low_date
df_temp = pd.Series(data={'低点价格': low_value,
'低点日期': low_date,
},
name=index,
)
elif row['低突破']:
s_date = last_day # 日期区间起点
e_date = row['date'] # 日期区间终点
high_date = H.loc[s_date:e_date].idxmax() # 高点日
high_value = H.loc[s_date:e_date].max() # 高点数值
last_day = high_date
df_temp = pd.Series(data={'高点价格': high_value,
'高点日期': high_date,
},
name=index,
)
TJ04_高低点 = TJ04_高低点.append(df_temp)
TJ04_高低点.reset_index(drop=True, inplace=True)
# 转换为pyecharts所需数据格式
marklinedata = []
temp = []
"""
x坐标是日期对应的整数序号,y坐标是价格
所需数据格式: [[{'xAxis': 起点x坐标, 'yAxis': 起点y坐标, 'value': 线长}, {'xAxis': 终点x坐标, 'yAxis': 终点y坐标}],
[{'xAxis': 起点x坐标, 'yAxis': 起点y坐标, 'value': 线长}, {'xAxis': 终点x坐标, 'yAxis': 终点y坐标}],
]
"""
last_day, last_value = 0, 0
for index, row in TJ04_高低点.iterrows():
if index == 0:
if pd.isna(row['低点价格']): # True=高点是有效数值 False=低点是有效数值
last_day = row['高点日期']
last_value = row['高点价格']
else:
last_day = row['低点日期']
last_value = row['低点价格']
continue
elif | pd.isna(row['低点价格']) | pandas.isna |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
description: provide 24hr feedback to clinicians
version: 0.0.1
created: 2018-08-01
author: <NAME>
dependencies:
* requires tidepool-analytics-env (see readme for instructions)
* requires a clinician or study username (email) and password
* requires tidals (tidepool data analytics tools)
license: BSD-2-Clause
"""
# %% REQUIRED LIBRARIES
import pandas as pd
import numpy as np
import os
import sys
import requests
import json
import argparse
import getpass
from pytz import timezone
from datetime import timedelta
import datetime as dt
import subprocess as sub
tidalsPath = os.path.abspath(os.path.join(__file__, "..", "..", "..", "tidals"))
if tidalsPath not in sys.path:
sys.path.insert(0, tidalsPath)
import tidals as td
envPath = os.path.abspath(os.path.join(__file__, "..", "..", "..",
"get-qualify-export-donor-data"))
if envPath not in sys.path:
sys.path.insert(0, envPath)
import environmentalVariables
# %% USER INPUTS
codeDescription = "Provide feedback of last 24 hours (6am to 6am) to clinicians"
parser = argparse.ArgumentParser(description=codeDescription)
parser.add_argument("-d",
"--date-stamp",
dest="dateStamp",
default=dt.datetime.now().strftime("%Y-%m-%d"),
help="date of the daily report, defaults to current date")
parser.add_argument("-a",
"--accountAlias",
dest="accountAlias",
default=np.nan,
help="enter an account alias so the master clinician or study account" +
"can be looked up in your environmental variables, OR leave this blank" +
"and you will be prompted to enter in account credentials")
parser.add_argument("-o",
"--output-data-path",
dest="outputPath",
default=os.path.abspath(os.path.join(".", "data")),
help="the output path where the data is stored")
parser.add_argument("-v",
"--verbose",
dest="verboseOutput",
default=True,
help="True if you want script progress to print to the console")
args = parser.parse_args()
# %% CHECK/DECLARE INPUTS AND OUTPUT VARIABLES
if pd.isnull(args.accountAlias):
os.environ["TEMP_EMAIL"] = getpass.getpass(prompt="email: ")
os.environ["TEMP_PASSWORD"] = getpass.getpass(prompt="password: ")
if (pd.isnull(os.environ["TEMP_EMAIL"]) | pd.isnull(os.environ["TEMP_PASSWORD"])):
sys.exit("error in entering user email and password")
else:
os.environ["TEMP_EMAIL"] = os.environ[args.accountAlias + "_EMAIL"]
os.environ["TEMP_PASSWORD"] = os.environ[args.accountAlias + "_PASSWORD"]
# create output folder if it doesn't exist
if not os.path.isdir(args.outputPath):
os.makedirs(args.outputPath)
# create a report output folder if it doesn't exist
reportDate = args.dateStamp
reportPath = os.path.join(args.outputPath, "reports")
reportOutputPath = os.path.join(reportPath, reportDate)
if not os.path.isdir(reportPath):
os.makedirs(reportPath)
os.makedirs(reportOutputPath)
indvidualDataFolder = os.path.join(reportOutputPath, "individual-data-files")
if not os.path.isdir(indvidualDataFolder):
os.makedirs(indvidualDataFolder)
# create a metadata output folder if it doesn't exist
metadataPath = os.path.join(args.outputPath, "metadata", reportDate)
jsonDataPath = os.path.join(metadataPath, "jsonData")
if not os.path.isdir(metadataPath):
os.makedirs(metadataPath)
os.makedirs(jsonDataPath)
allStats = pd.DataFrame()
metaData = pd.DataFrame(columns=["userID",
"studyID",
"getData.response1",
"getData.response2",
"nDuplicatesRemoved"])
# %% FUNCTIONS
def get_stats(df):
statDF = pd.DataFrame(index=[0])
statDF["totalNumberCBGValues"] = df.mg_dL.count()
statDF["mean_mgdL"] = df.mg_dL.mean()
statDF["std_mgdL"] = df.mg_dL.std()
statDF["cov_mgdL"] = statDF["std_mgdL"] / statDF["mean_mgdL"]
statDF["totalBelow54"] = sum(df.mg_dL < 54)
statDF["totalBelow70"] = sum(df.mg_dL < 70)
statDF["total54to70"] = sum((df.mg_dL >= 54) & (df.mg_dL < 70))
statDF["total70to140"] = sum((df.mg_dL >= 70) & (df.mg_dL <= 140))
statDF["total70to180"] = sum((df.mg_dL >= 70) & (df.mg_dL <= 180))
statDF["total180to250"] = sum((df.mg_dL > 180) & (df.mg_dL <= 250))
statDF["totalAbove180"] = sum(df.mg_dL > 180)
statDF["totalAbove250"] = sum(df.mg_dL > 250)
statDF["percentBelow54"] = statDF["totalBelow54"] / statDF["totalNumberCBGValues"]
statDF["percentBelow70"] = statDF["totalBelow70"] / statDF["totalNumberCBGValues"]
statDF["percent70to140"] = statDF["total70to140"] / statDF["totalNumberCBGValues"]
statDF["percent70to180"] = statDF["total70to180"] / statDF["totalNumberCBGValues"]
statDF["percentAbove180"] = statDF["totalAbove180"] / statDF["totalNumberCBGValues"]
statDF["percentAbove250"] = statDF["totalAbove250"] / statDF["totalNumberCBGValues"]
statDF["min_mgdL"] = df.mg_dL.min()
statDF["median_mgdL"] = df.mg_dL.describe()["50%"]
statDF["max_mgdL"] = df.mg_dL.max()
# calculate the start and end time of the cbg data
startTime = df["localTime"].min()
statDF["startTime"] = startTime
endTime = df["localTime"].max()
statDF["endTime"] = endTime
statDF["totalNumberPossibleCBGvalues"] = len(pd.date_range(startTime, endTime, freq="5min"))
# feedback criteria
# A. incomplete dataset
statDF["percentOfExpectedData"] = \
(((endTime - startTime).days * 86400) +
((endTime - startTime).seconds)) / (86400 - (5*60))
if statDF.loc[0, "percentOfExpectedData"] < 0.834: # greater than 4 hours of expected data
statDF["GTE4hoursNoCgmSignal"] = "NA"
statDF["incompleteDataset"] = "FLAG (" + \
str(round(statDF.loc[0, "percentOfExpectedData"] * 100, 1)) + "%)"
else:
statDF["incompleteDataset"] = np.nan
# 1. >=4 hours without CGM signal
missingCgm = statDF["totalNumberPossibleCBGvalues"] - statDF["totalNumberCBGValues"]
if missingCgm[0] > (4 * 60 / 5):
statDF["GTE4hoursNoCgmSignal"] = "FLAG"
else:
statDF["GTE4hoursNoCgmSignal"] = np.nan
# 2. >= 2 hours 54 <= BG < 70 mg/dl
if statDF.loc[0, "total54to70"] > (2 * 60 / 5):
statDF["GTE2hoursBetween54to70"] = \
"FLAG (" + str(round(statDF.loc[0, "total54to70"] * 5)) + "min)"
else:
statDF["GTE2hoursBetween54to70"] = np.nan
# 3. >= 15 minutes < 54 mg/dl"
if statDF.loc[0, "totalBelow54"] > (15 / 5):
statDF["GTE15minBelow54"] = "FLAG (" + str(round(statDF.loc[0, "totalBelow54"] * 5)) + "min)"
else:
statDF["GTE15minBelow54"] = np.nan
return statDF
def sort_and_pretty_stat_output(df):
for col in list(df):
if (("percent" in col) | ("cov" in col)):
df[col] = round(df[col] * 100, 1)
for col in ["mean_mgdL", "std_mgdL"]:
df[col] = round(df[col], 1)
df = df[["studyID",
"incompleteDataset",
"GTE4hoursNoCgmSignal",
"GTE2hoursBetween54to70",
"GTE15minBelow54",
"totalNumberCBGValues",
"totalNumberPossibleCBGvalues",
"startTime",
"endTime",
"percentOfExpectedData",
"mean_mgdL",
"std_mgdL",
"cov_mgdL",
"min_mgdL",
"median_mgdL",
"max_mgdL",
"percentBelow54",
"percentBelow70",
"percent70to140",
"percent70to180",
"percentAbove180",
"percentAbove250",
"totalBelow54",
"totalBelow70",
"total54to70",
"total70to140",
"total70to180",
"total180to250",
"totalAbove180",
"totalAbove250"]]
return df
def get_timeZoneOffset(currentDate, userTz):
tz = timezone(userTz)
tzoNum = int(tz.localize(pd.to_datetime(currentDate) + timedelta(days=1)).strftime("%z"))
tzoHours = np.floor(tzoNum / 100)
tzoMinutes = round((tzoNum / 100 - tzoHours) * 100, 0)
tzoSign = np.sign(tzoHours)
tzo = int((tzoHours * 60) + (tzoMinutes * tzoSign))
return tzo
def get_donor_lists(email, password, outputDonorList):
p = sub.Popen(["getusers", email,
"-p", password, "-o",
outputDonorList, "-v"], stdout=sub.PIPE, stderr=sub.PIPE)
output, errors = p.communicate()
output = output.decode("utf-8")
errors = errors.decode("utf-8")
if output.startswith("Successful login.\nSuccessful") is False:
sys.exit("ERROR with" + email +
" ouput: " + output +
" errorMessage: " + errors)
return
def load_donors(outputDonorList):
donorList = []
if os.stat(outputDonorList).st_size > 0:
donorList = pd.read_csv(outputDonorList,
header=None,
usecols=[0, 1],
names=["userID", "name"],
low_memory=False)
return donorList
def get_json_data(email, password, userid, outputFilePathName, startDate, endDate):
url1 = "https://api.tidepool.org/auth/login"
myResponse = requests.post(url1, auth=(email, password))
if(myResponse.ok):
xtoken = myResponse.headers["x-tidepool-session-token"]
url2 = "https://api.tidepool.org/data/" + userid + \
"?endDate=" + endDate.strftime("%Y-%m-%d") + \
"T23:59:59.000Z&startDate=" + \
startDate.strftime("%Y-%m-%d") + "T00:00:00.000Z"
headers = {
"x-tidepool-session-token": xtoken,
"Content-Type": "application/json"
}
myResponse2 = requests.get(url2, headers=headers)
if(myResponse2.ok):
usersData = json.loads(myResponse2.content.decode())
with open(outputFilePathName, "w") as outfile:
json.dump(usersData, outfile)
if args.verboseOutput == True:
print("successfully downloaded to " + outputFilePathName)
else:
print("ERROR", myResponse2.status_code)
else:
print("ERROR", myResponse.status_code)
myResponse2 = np.nan
return myResponse, myResponse2
# %% START OF CODE
# get the list of donors if it doesn't already exist
outputDonorList = os.path.abspath(os.path.join(args.outputPath, "PHI-study-participants.csv"))
if not os.path.exists(outputDonorList):
get_donor_lists(os.environ["TEMP_EMAIL"], os.environ["TEMP_PASSWORD"], outputDonorList)
# load in the donor list
studyPartipants = load_donors(outputDonorList)
# deal with a specific use case called telet1d
if args.accountAlias in ["TELET1D"]:
studyPartipants = studyPartipants[studyPartipants["name"] !=
"<NAME>"].sort_values("name").reset_index(drop=True)
studyPartipants.to_csv(outputDonorList, index_label="dIndex")
else:
studyPartipants = pd.read_csv(outputDonorList, index_col="dIndex", low_memory=False)
for dIndex in studyPartipants.index:
userID = studyPartipants.userID[dIndex]
studyID = studyPartipants["name"][dIndex]
metaData.loc[dIndex, ["userID", "studyID"]] = userID, studyID
outputFileLocation = os.path.join(jsonDataPath, "PHI-" + userID + ".json")
startDate = pd.to_datetime(reportDate) - pd.Timedelta(2, unit="D")
endDate = pd.to_datetime(reportDate) + pd.Timedelta(1, unit="D")
reponse1, reponse2 = get_json_data(os.environ["TEMP_EMAIL"], os.environ["TEMP_PASSWORD"],
userID, outputFileLocation, startDate, endDate)
metaData.loc[dIndex, ["getData.response1", "getData.response2"]] = \
reponse1.status_code, reponse2.status_code
# load json data
data = pd.read_json(outputFileLocation)
if "type" in list(data):
if "cbg" in data.type.unique():
# calculate stats
cgmData = data[data.type == "cbg"].copy()
cgmData["utcTime"] = pd.to_datetime(cgmData.time, utc=True)
# get data from 6am to 6am
if (("timezone" in list(data)) | ("timezoneOffset" in list(data))):
if "timezone" in list(data):
userTz = data.timezone.describe()["top"]
tzo = get_timeZoneOffset(reportDate, userTz)
tz = timezone(userTz)
start6amDate = tz.localize( | pd.to_datetime(reportDate) | pandas.to_datetime |
from typing import Any, Callable, Dict, Sequence, TypeVar, cast
try:
import pandas # type: ignore
except ImportError:
pandas = None
from .activity import Activity, Trade
from .position import Position
_ConvertibleModel = TypeVar("_ConvertibleModel", Position, Activity)
def dataframeForModelObjects(items: Sequence[_ConvertibleModel]) -> pandas.DataFrame:
assert pandas, "Pandas needs to be installed to use this function"
if len(items) > 0:
rows = [[fn(i) for fn in _dataframeColumnFunctions(i).values()] for i in items]
columns = _dataframeColumnFunctions(items[0]).keys()
return | pandas.DataFrame(rows, columns=columns) | pandas.DataFrame |
"""Testing basic pipeline stages."""
import pandas as pd
from pdpipe.cq import StartWith
from pdpipe.basic_stages import ValDrop
def test_valdrop_with_columns():
"""Testing the ColDrop pipeline stage."""
df = pd.DataFrame([[1, 4], [4, 5], [18, 11]], [1, 2, 3], ['a', 'b'])
res_df = ValDrop([4], 'a').apply(df)
assert 1 in res_df.index
assert 2 not in res_df.index
assert 3 in res_df.index
def test_valdrop_with_columns_verbose():
"""Testing the ColDrop pipeline stage."""
df = pd.DataFrame([[1, 4], [4, 5], [18, 11]], [1, 2, 3], ['a', 'b'])
res_df = ValDrop([4], 'a').apply(df, verbose=True)
assert 1 in res_df.index
assert 2 not in res_df.index
assert 3 in res_df.index
def test_valdrop_without_columns():
"""Testing the ColDrop pipeline stage."""
df = pd.DataFrame([[1, 4], [4, 5], [18, 11]], [1, 2, 3], ['a', 'b'])
res_df = ValDrop([4]).apply(df)
assert 1 not in res_df.index
assert 2 not in res_df.index
assert 3 in res_df.index
def test_valdrop_w_fittable_cq():
df = pd.DataFrame([[1, 4], [4, 5]], [1, 2], ['aa', 'ba'])
vdrop = ValDrop([4], columns=StartWith('a'))
res_df = vdrop(df)
assert 1 in res_df.index
assert 2 not in res_df.index
# now after the column qualifier is fitter, 'ag' should not be transformed
df = | pd.DataFrame([[1, 4], [4, 5]], [1, 2], ['aa', 'ag']) | pandas.DataFrame |
"""
Additional tests for PandasArray that aren't covered by
the interface tests.
"""
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.arrays import PandasArray
from pandas.core.arrays.numpy_ import PandasDtype
@pytest.fixture(
params=[
np.array(["a", "b"], dtype=object),
np.array([0, 1], dtype=float),
np.array([0, 1], dtype=int),
np.array([0, 1 + 2j], dtype=complex),
np.array([True, False], dtype=bool),
np.array([0, 1], dtype="datetime64[ns]"),
np.array([0, 1], dtype="timedelta64[ns]"),
]
)
def any_numpy_array(request):
"""
Parametrized fixture for NumPy arrays with different dtypes.
This excludes string and bytes.
"""
return request.param
# ----------------------------------------------------------------------------
# PandasDtype
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", True),
("uint", True),
("float", True),
("complex", True),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_numeric(dtype, expected):
dtype = PandasDtype(dtype)
assert dtype._is_numeric is expected
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", False),
("uint", False),
("float", False),
("complex", False),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_boolean(dtype, expected):
dtype = PandasDtype(dtype)
assert dtype._is_boolean is expected
def test_repr():
dtype = PandasDtype(np.dtype("int64"))
assert repr(dtype) == "PandasDtype('int64')"
def test_constructor_from_string():
result = | PandasDtype.construct_from_string("int64") | pandas.core.arrays.numpy_.PandasDtype.construct_from_string |
import pyaniasetools as aat
import pyanitools as ant
import hdnntools as hdt
import pandas as pd
import sys
import numpy as np
import re
import os
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.colors import LogNorm
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from matplotlib.backends.backend_pdf import PdfPages
#import seaborn as sns
pd.options.display.float_format = '{:.2f}'.format
# ----------------------------------
# Plot force histogram
# ----------------------------------
def plot_corr_dist_axes(ax, Xp, Xa, cmap, labelx, labely, plabel, vmin=0, vmax=0, inset=True):
Fmx = Xa.max()
Fmn = Xa.min()
# Plot ground truth line
ax.plot([Fmn, Fmx], [Fmn, Fmx], '--', c='red', linewidth=3)
# Set labels
ax.set_xlabel(labelx, fontsize=26)
ax.set_ylabel(labely, fontsize=26)
# Plot 2d Histogram
if vmin == 0 and vmax ==0:
bins = ax.hist2d(Xp, Xa, bins=200, norm=LogNorm(), range=[[Fmn, Fmx], [Fmn, Fmx]], cmap=cmap)
else:
bins = ax.hist2d(Xp, Xa, bins=200, norm=LogNorm(), range=[[Fmn, Fmx], [Fmn, Fmx]], cmap=cmap, vmin=vmin, vmax=vmax)
# Build color bar
#cbaxes = fig.add_axes([0.91, 0.1, 0.03, 0.8])
# Annotate with label
ax.text(0.25*((Fmx-Fmn))+Fmn, 0.06*((Fmx-Fmn))+Fmn, plabel, fontsize=26)
# Annotate with errors
PMAE = hdt.calculatemeanabserror(Xa, Xp)
PRMS = hdt.calculaterootmeansqrerror(Xa, Xp)
ax.text(0.6*((Fmx-Fmn))+Fmn, 0.2*((Fmx-Fmn))+Fmn, 'MAE='+"{:.3f}".format(PMAE)+'\nRMSE='+"{:.3f}".format(PRMS), fontsize=30,
bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5})
if inset:
axins = zoomed_inset_axes(ax, 2., loc=2) # zoom = 6
sz = 0.1*(Fmx-Fmn)
axins.hist2d(Xp, Xa, bins=50, range=[[Xa.mean() - sz, Xa.mean() + sz], [Xp.mean() - sz, Xp.mean() + sz]], norm=LogNorm(), cmap=cmap)
axins.plot([Xp.mean() - sz, Xp.mean() + sz], [Xp.mean() - sz, Xp.mean() + sz], '--', c='r', linewidth=3)
# sub region of the original image
x1, x2, y1, y2 = Xa.mean() - sz, Xa.mean() + sz, Xp.mean() - sz, Xp.mean() + sz
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
axins.yaxis.tick_right()
mark_inset(ax, axins, loc1=1, loc2=3, fc="none", ec="1.5")
plt.xticks(visible=True)
plt.yticks(visible=True)
return bins
def add_inset_histogram(Xa, Xp, pos, ylim, xlim):
Ferr = Xa - Xp
std = np.std(Ferr)
men = np.mean(Ferr)
axh = plt.axes(pos)
axh.hist(Ferr, bins=75, range=[men - 4 * std, men + 4 * std], normed=False)
axh.set_ylim(ylim)
axh.set_xlim(xlim)
#axh.set_title('Difference distribution')
# ----------------------------------
# Plot force histogram
# ----------------------------------
def plot_corr_dist(Xa, Xp, inset=True, xlabel='$F_{dft}$' + r' $(kcal \times mol^{-1} \times \AA^{-1})$', ylabel='$F_{dft}$' + r' $(kcal \times mol^{-1} \times \AA^{-1})$', figsize=[13,10], cmap=mpl.cm.viridis):
Fmx = Xa.max()
Fmn = Xa.min()
label_size = 14
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
fig, ax = plt.subplots(figsize=figsize)
# Plot ground truth line
ax.plot([Fmn, Fmx], [Fmn, Fmx], '--', c='r', linewidth=3)
# Set labels
ax.set_xlabel(xlabel, fontsize=22)
ax.set_ylabel(ylabel, fontsize=22)
#cmap = mpl.cm.viridis
#cmap = mpl.cm.brg
# Plot 2d Histogram
bins = ax.hist2d(Xa, Xp, bins=200, norm=LogNorm(), range= [[Xa.min(), Xa.max()], [Xp.min(), Xp.max()]], cmap=cmap)
# Build color bar
#cbaxes = fig.add_axes([0.91, 0.1, 0.03, 0.8])
cb1 = fig.colorbar(bins[-1], cmap=cmap)
cb1.set_label('Count', fontsize=16)
# Annotate with errors
PMAE = hdt.calculatemeanabserror(Xa, Xp)
PRMS = hdt.calculaterootmeansqrerror(Xa, Xp)
ax.text(0.75*((Fmx-Fmn))+Fmn, 0.43*((Fmx-Fmn))+Fmn, 'MAE='+"{:.3f}".format(PMAE)+'\nRMSE='+"{:.3f}".format(PRMS), fontsize=20,
bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5})
if inset:
axins = zoomed_inset_axes(ax, 2.2, loc=2) # zoom = 6
sz = 6
axins.hist2d(Xa, Xp,bins=50, range=[[Fmn/sz, Fmx/sz], [Fmn/sz, Fmx/sz]], norm=LogNorm(), cmap=cmap)
axins.plot([Xa.min(), Xa.max()], [Xa.min(), Xa.max()], '--', c='r', linewidth=3)
# sub region of the original image
x1, x2, y1, y2 = Fmn/sz, Fmx/sz, Fmn/sz, Fmx/sz
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
axins.yaxis.tick_right()
plt.xticks(visible=True)
plt.yticks(visible=True)
mark_inset(ax, axins, loc1=1, loc2=3, fc="none", ec="0.5")
Ferr = Xa - Xp
std = np.std(Ferr)
men = np.mean(Ferr)
axh = plt.axes([.49, .16, .235, .235])
axh.hist(Ferr, bins=75, range=[men-4*std, men+4*std], normed=True)
axh.set_title('Difference distribution')
#plt.draw()
plt.show()
class generate_ensemble_data(aat.anicrossvalidationconformer):
'''Constructor'''
def __init__(self, networks, tsfiles, gpu=0):
super().__init__(networks['cns'], networks['sae'], networks['nnf'], networks['nts'], gpu )
self.tsfiles = tsfiles
self.Nn = networks['nts']
'''Stat generator'''
def generate_stats(self, maxe=sys.float_info.max, forces=True, grad=False):
self.tdata = dict()
for key in self.tsfiles.keys():
print(' -Working on',key,'...')
cdata = dict({'Eani': [],
'Edft': [],
'Erel': [],
'Fani': [],
'Fdft': [],
'dEani': [],
'dEdft': [],
'Na': [],
'Na2': [],})
for file in self.tsfiles[key]:
print(key,file)
adl = ant.anidataloader(file)
for i, data in enumerate(adl):
#if i > 5:
# break
if data['coordinates'].shape[0] != 0:
Eani, Fani, sig = self.compute_energyandforce_conformations(np.array(data['coordinates'],dtype=np.float64), data['species'], ensemble=False)
midx = np.where( data['energies'] - data['energies'].min() < maxe/hdt.hatokcal )[0]
Eani = Eani[:,midx]
Edft = data['energies'][midx]
Erel = (data['energies'] - data['energies'].min())[midx]
Fani = Fani[:,midx,:,:]
if forces:
if grad:
Fdft = -data['forces'][midx]
else:
Fdft = data['forces'][midx]
else:
Fdft = 0.0*data['coordinates'][midx]
#Eestd = np.std(Eani, axis=0)/np.sqrt(len(data['species']))
Eeani = np.mean(Eani, axis=0).reshape(1,-1)
Feani = np.mean(Fani, axis=0).flatten().reshape(1,-1)
Fani = Fani.reshape(Fani.shape[0],-1)
Eani = np.vstack([Eani, Eeani])
Fani = np.vstack([Fani, Feani])
Edft = hdt.hatokcal * Edft
Fdft = hdt.hatokcal * Fdft.flatten()
cdata['Na'].append(np.full(Edft.size, len(data['species']), dtype=np.int32))
cdata['Eani'].append(Eani)
cdata['Edft'].append(Edft)
cdata['Erel'].append(Erel)
cdata['Fani'].append(Fani)
cdata['Fdft'].append(Fdft)
#cdata['Frmse'].append(np.sqrt(np.mean((Fani-Fdft).reshape(Fdft.shape[0], -1)**2, axis=1)))
#cdata['Frmae'].append(np.sqrt(np.mean(np.abs((Fani - Fdft).reshape(Fdft.shape[0], -1)), axis=1)))
cdata['dEani'].append(hdt.calculateKdmat(self.Nn+1, Eani))
cdata['dEdft'].append(hdt.calculatedmat(Edft))
cdata['Na2'].append(np.full(cdata['dEdft'][-1].size, len(data['species']), dtype=np.int32))
#cdata['Erani'].append(Eani-Eani.min())
#cdata['Erdft'].append(Edft-Edft.min())
for k in ['Na', 'Na2', 'Edft', 'Fdft', 'dEdft', 'Erel']:
cdata[k] = np.concatenate(cdata[k])
for k in ['Eani', 'Fani', 'dEani']:
cdata[k] = np.hstack(cdata[k])
self.tdata.update({key: cdata})
''' Generate total errors '''
def store_data(self, filename):
if os.path.exists(filename):
os.remove(filename)
dpack = ant.datapacker(filename)
for k in self.tdata.keys():
dpack.store_data(k,**(self.tdata[k]))
dpack.cleanup()
names = ['E$_\mathrm{MAE}$$\mu$',
'E$_\mathrm{MAE}$$\sigma$',
'E$_\mathrm{RMS}$$\mu$',
'E$_\mathrm{RMS}$$\sigma$',
'$\Delta$E$_\mathrm{MAE}$$\mu$',
'$\Delta$E$_\mathrm{MAE}$$\sigma$',
'$\Delta$E$_\mathrm{RMS}$$\mu$',
'$\Delta$E$_\mathrm{RMS}$$\sigma$',
'F$_\mathrm{MAE}$$\mu$',
'F$_\mathrm{MAE}$$\sigma$',
'F$_\mathrm{RMS}$$\mu$',
'F$_\mathrm{RMS}$$\sigma$',
]
class evaluate_ensemble_data(aat.anicrossvalidationconformer):
'''Constructor'''
def __init__(self, datafile):
self.fdata = dict()
for df in datafile:
adl = ant.anidataloader(df)
tdata = dict()
for data in adl:
tdata.update({data['path'].split('/')[-1] : data})
adl.cleanup()
self.fdata[df.split('tsdata_')[-1].split('.h5')[0]] = tdata
''' Generate total errors '''
def generate_fullset_errors(self, ntkey, tslist):
#idx = np.nonzero(self.fdata[ntkey][tskey]['Erdft'])
#tskeys = self.fdata[ntkey].keys()
if not tslist:
tskeys = self.fdata[ntkey].keys()
else:
tskeys = tslist
Nn = self.fdata[ntkey][list(tskeys)[0]]['Eani'].shape[0]-1
#print(self.fdata[ntkey][tskey]['Fdft'].shape)
return {names[0]: hdt.calculatemeanabserror(
np.concatenate([self.fdata[ntkey][tskey]['Eani'][Nn,:] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Edft'] for tskey in tskeys])),
names[1]: np.std(hdt.calculatemeanabserror(
np.hstack([self.fdata[ntkey][tskey]['Eani'][0:Nn,:] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['Edft'] for tskey in tskeys]), axis=1)),
names[2]: hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['Eani'][Nn, :] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Edft'] for tskey in tskeys])),
names[3]: np.std(hdt.calculaterootmeansqrerror(
np.hstack([self.fdata[ntkey][tskey]['Eani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['Edft'] for tskey in tskeys]), axis=1)),
names[4]: hdt.calculatemeanabserror(
np.concatenate([self.fdata[ntkey][tskey]['dEani'][Nn, :] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['dEdft'] for tskey in tskeys])),
names[5]: np.std(hdt.calculatemeanabserror(
np.hstack([self.fdata[ntkey][tskey]['dEani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['dEdft'] for tskey in tskeys]), axis=1)),
names[6]: hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['dEani'][Nn, :] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['dEdft'] for tskey in tskeys])),
names[7]: np.std(hdt.calculaterootmeansqrerror(
np.hstack([self.fdata[ntkey][tskey]['dEani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['dEdft'] for tskey in tskeys]), axis=1)),
names[8]: hdt.calculatemeanabserror(
np.concatenate([self.fdata[ntkey][tskey]['Fani'][Nn, :] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Fdft'] for tskey in tskeys])),
names[9]: np.std(hdt.calculatemeanabserror(
np.hstack([self.fdata[ntkey][tskey]['Fani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['Fdft'] for tskey in tskeys]), axis=1)),
names[10]: hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['Fani'][Nn, :] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Fdft'] for tskey in tskeys])),
names[11]: np.std(hdt.calculaterootmeansqrerror(
np.hstack([self.fdata[ntkey][tskey]['Fani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['Fdft'] for tskey in tskeys]), axis=1)),
#'FMAEm': hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Fani'][Nn,:], self.fdata[ntkey][tskey]['Fdft']),
#'FMAEs': np.std(hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Fani'][0:Nn,:], self.fdata[ntkey][tskey]['Fdft'], axis=1)),
#'FRMSm': hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Fani'][Nn,:], self.fdata[ntkey][tskey]['Fdft']),
#'FRMSs': np.std(hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Fani'][0:Nn, :],self.fdata[ntkey][tskey]['Fdft'], axis=1)),
#'dEMAE': hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['dEani'], self.fdata[ntkey][tskey]['dEdft']),
#'dERMS': hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['dEani'], self.fdata[ntkey][tskey]['dEdft']),
#'ERMAE': hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Erani'][idx], self.fdata[ntkey][tskey]['Erdft'][idx]),
#'ERRMS': hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Erani'][idx], self.fdata[ntkey][tskey]['rdft'][idx]),
}
''' Generate total errors '''
def get_range_stats(self, tslist, dkey):
#idx = np.nonzero(self.fdata[ntkey][tskey]['Erdft'])
ntkey = list(self.fdata.keys())[0]
if not tslist:
tskeys = self.fdata[ntkey].keys()
else:
tskeys = tslist
Nn = self.fdata[ntkey][list(tskeys)[0]][dkey].shape[0]-1
return np.concatenate([self.fdata[ntkey][tskey][dkey] for tskey in tskeys])
''' Generate total errors '''
def generate_fullset_peratom_errors(self, ntkey, tslist):
#idx = np.nonzero(self.fdata[ntkey][tskey]['Erdft'])
if not tslist:
tskeys = self.fdata[ntkey].keys()
else:
tskeys = tslist
Nn = self.fdata[ntkey][list(tskeys)[0]]['Eani'].shape[0]-1
#print(self.fdata[ntkey]['GDB07to09']['Eani'][Nn,:])
#print(self.fdata[ntkey]['GDB07to09']['Na'])
#print(self.fdata[ntkey]['GDB07to09']['Eani'][Nn,:]/self.fdata[ntkey]['GDB07to09']['Na'])
return {names[0]: 1000*hdt.calculatemeanabserror(
np.concatenate([self.fdata[ntkey][tskey]['Eani'][Nn,:]/self.fdata[ntkey][tskey]['Na'] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Edft']/self.fdata[ntkey][tskey]['Na'] for tskey in tskeys])),
names[2]: 1000*hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['Eani'][Nn, :]/self.fdata[ntkey][tskey]['Na'] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Edft']/self.fdata[ntkey][tskey]['Na'] for tskey in tskeys])),
names[4]: 1000*hdt.calculatemeanabserror(
np.concatenate([self.fdata[ntkey][tskey]['dEani'][Nn, :] / self.fdata[ntkey][tskey]['Na2'] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['dEdft'] / self.fdata[ntkey][tskey]['Na2'] for tskey in tskeys])),
names[6]: 1000*hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['dEani'][Nn, :] / self.fdata[ntkey][tskey]['Na2'] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['dEdft'] / self.fdata[ntkey][tskey]['Na2'] for tskey in tskeys])),
}
''' Generate total errors '''
def generate_fullset_mean_errors(self, ntkey):
#idx = np.nonzero(self.fdata[ntkey][tskey]['Erdft'])
tskeys = self.fdata[ntkey].keys()
Nn = self.fdata[ntkey][list(tskeys)[0]]['Eani'].shape[0]-1
return {names[2]+'E': hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['Eani'][Nn,:] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Edft'] for tskey in tskeys])),
names[2]+'M': np.mean(hdt.calculaterootmeansqrerror(
np.hstack([self.fdata[ntkey][tskey]['Eani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['Edft'] for tskey in tskeys]),axis=1)),
names[6]+'E': hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['dEani'][Nn, :] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['dEdft'] for tskey in tskeys])),
names[6]+'M': np.mean(hdt.calculaterootmeansqrerror(
np.hstack([self.fdata[ntkey][tskey]['dEani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['dEdft'] for tskey in tskeys]),axis=1)),
names[10]+'E': hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['Fani'][Nn, :] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Fdft'] for tskey in tskeys])),
names[10]+'M': np.mean(hdt.calculaterootmeansqrerror(
np.hstack([self.fdata[ntkey][tskey]['Fani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['Fdft'] for tskey in tskeys]),axis=1)),
}
''' Generate total errors '''
def generate_total_errors(self, ntkey, tskey):
#idx = np.nonzero(self.fdata[ntkey][tskey]['Erdft'])
Nn = self.fdata[ntkey][tskey]['Eani'].shape[0]-1
return {names[0]: hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Eani'][Nn,:], self.fdata[ntkey][tskey]['Edft']),
names[1]: np.std(hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Eani'][0:Nn,:], self.fdata[ntkey][tskey]['Edft'], axis=1)),
names[2]: hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Eani'][Nn,:], self.fdata[ntkey][tskey]['Edft']),
names[3]: np.std(hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Eani'][0:Nn,:], self.fdata[ntkey][tskey]['Edft'], axis=1)),
names[4]: hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['dEani'][Nn,:], self.fdata[ntkey][tskey]['dEdft']),
names[5]: np.std(hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['dEani'][0:Nn,:], self.fdata[ntkey][tskey]['dEdft'], axis=1)),
names[6]: hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['dEani'][Nn,:], self.fdata[ntkey][tskey]['dEdft']),
names[7]: np.std(hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['dEani'][0:Nn,:], self.fdata[ntkey][tskey]['dEdft'], axis=1)),
names[8]: hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Fani'][Nn,:], self.fdata[ntkey][tskey]['Fdft']),
names[9]: np.std(hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Fani'][0:Nn,:], self.fdata[ntkey][tskey]['Fdft'], axis=1)),
names[10]: hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Fani'][Nn,:], self.fdata[ntkey][tskey]['Fdft']),
names[11]: np.std(hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Fani'][0:Nn, :],self.fdata[ntkey][tskey]['Fdft'], axis=1)),
#'dEMAE': hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['dEani'], self.fdata[ntkey][tskey]['dEdft']),
#'dERMS': hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['dEani'], self.fdata[ntkey][tskey]['dEdft']),
#'ERMAE': hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Erani'][idx], self.fdata[ntkey][tskey]['Erdft'][idx]),
#'ERRMS': hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Erani'][idx], self.fdata[ntkey][tskey]['rdft'][idx]),
}
def determine_min_error_by_sigma(self, ntkey, minerror, percent, tskeys = ['GDB07to09'], figsize=(15.0, 12.0), labelx='', labely='', xyrange=(0.0,10.0,0.0,10.0), storepath='', cmap=mpl.cm.viridis):
#tskeys = self.fdata[ntkey].keys()
mpl.rcParams['xtick.labelsize'] = 18
mpl.rcParams['ytick.labelsize'] = 18
Nn = self.fdata[ntkey][list(tskeys)[0]]['Eani'].shape[0]-1
Eani = np.hstack([self.fdata[ntkey][tskey]['Eani'][0:Nn, :] for tskey in tskeys])
Eanimu = np.hstack([self.fdata[ntkey][tskey]['Eani'][Nn, :] for tskey in tskeys])
#Eani = np.hstack([self.fdata[ntkey][tskey]['Eani'][Nn, :] for tskey in tskeys])
Edft = np.concatenate([self.fdata[ntkey][tskey]['Edft'] for tskey in tskeys])
#print(Eani.shape, Edft.shape, )
#print(np.max(Eerr.shape, axis=0))
Sani = np.concatenate([np.std(self.fdata[ntkey][tskey]['Eani'][0:Nn, :], axis=0) for tskey in tskeys])
Na = np.concatenate([self.fdata[ntkey][tskey]['Na'] for tskey in tskeys])
#print(Sani.shape, Na.shape)
Sani = Sani / np.sqrt(Na)
Eerr = np.max(np.abs(Eani - Edft),axis=0) / np.sqrt(Na)
#Eerr = np.abs(np.mean(Eani,axis=0) - Edft) / np.sqrt(Na)
#Eerr = np.abs(Eani - Edft) / np.sqrt(Na)
#print(Eerr)
#print(Sani)
Nmax = np.where(Eerr > minerror)[0].size
perc = 0
dS = Sani.max()
step = 0
while perc < percent:
S = dS - step*0.001
Sidx = np.where(Sani > S)
step += 1
perc = 100.0*np.where(Eerr[Sidx] > minerror)[0].size/(Nmax+1.0E-7)
#print(step,perc,S,Sidx)
#print('Step:',step, 'S:',S,' -Perc over:',perc,'Total',100.0*Sidx[0].size/Edft.size)
#dE = np.max(Eerr, axis=0) / np.sqrt(Na)
#print(Eerr.shape,Eerr)
So = np.where(Sani > S)
Su = np.where(Sani <= S)
print('RMSE Over: ', hdt.calculaterootmeansqrerror(Eanimu[So],Edft[So]))
print('RMSE Under: ', hdt.calculaterootmeansqrerror(Eanimu[Su],Edft[Su]))
fig, ax = plt.subplots(figsize=figsize)
poa = np.where(Eerr[So] > minerror)[0].size / So[0].size
pob = np.where(Eerr > minerror)[0].size / Eerr.size
ax.text(0.57*(xyrange[1]), 0.04*(xyrange[3]), 'Total Captured: ' + str(int(100.0 * Sidx[0].size / Edft.size)) + '%' +
'\n' + r'($\mathrm{\mathcal{E}>}$'+ "{:.1f}".format(minerror) + r'$\mathrm{) \forall \rho}$: ' + str(int(100*pob)) + '%' +
'\n' + r'($\mathrm{\mathcal{E}>}$'+ "{:.1f}".format(minerror) + r'$\mathrm{) \forall \rho >}$' + "{:.2f}".format(S) + ': ' + str(int(100*poa)) + '%' +
'\n' + r'$\mathrm{E}$ RMSE ($\mathrm{\rho>}$'+ "{:.2f}".format(S) + r'$\mathrm{)}$: ' + "{:.1f}".format(hdt.calculaterootmeansqrerror(Eanimu[So],Edft[So])) +
'\n' + r'$\mathrm{E}$ RMSE ($\mathrm{\rho\leq}$' + "{:.2f}".format(S) + r'$\mathrm{)}$: ' + "{:.1f}".format(hdt.calculaterootmeansqrerror(Eanimu[Su], Edft[Su])),
bbox={'facecolor':'grey', 'alpha':0.5, 'pad':10}, fontsize=18)
plt.axvline(x=S,linestyle='--',color='r',linewidth=5, label=r"$\mathrm{\rho=}$"+"{:.2f}".format(S) + ' is the value that captures\n'+ str(int(percent)) + '% of errors over ' + r"$\mathrm{\mathcal{E}=}$" + "{:.1f}".format(minerror))
#)
# Set labels
ax.set_xlabel(labelx, fontsize=24)
ax.set_ylabel(labely, fontsize=24)
# Plot 2d Histogram
bins = ax.hist2d(Sani, Eerr, bins=400, norm=LogNorm(), range=[[xyrange[0], xyrange[1]], [xyrange[2], xyrange[3]]], cmap=cmap)
# Build color bar
# cbaxes = fig.add_axes([0.91, 0.1, 0.03, 0.8])
cb1 = fig.colorbar(bins[-1], cmap=cmap)
cb1.set_label('Count', fontsize=20)
cb1.ax.tick_params(labelsize=18)
plt.legend(loc='upper center',fontsize=18)
if storepath:
pp = PdfPages(storepath)
pp.savefig(fig)
pp.close()
else:
plt.show()
def get_net_keys(self):
return self.fdata.keys()
def get_totalerror_table(self, tslist = []):
errors = dict()
for k in self.fdata.keys():
errors[k] = pd.Series(self.generate_fullset_errors(k, tslist))
| pd.set_option('expand_frame_repr', False) | pandas.set_option |
def chi2_test(df_test, df_theory=None):
if df_theory is not None: # 適合度の検定
chi2 = ((df_test - df_theory)**2 / df_theory).sum().sum()
df = len(df_test.columns) - 1
p = st.chi2.sf(chi2, df)
return chi2, p
else: # 独立性の検定
# columnsとindexと全体の合計値を求める
sum_columns, sum_index = list(df_test.sum()), list(df_test.sum(axis=1))
sum_all = sum(sum_columns)
# 期待度数を求める
expected_frequency = []
for index in sum_index:
index_values = []
for column in sum_columns:
index_values.append(index*column / sum_all)
expected_frequency.append(index_values)
# 検定統計量とp値を求める
df_expected = pd.DataFrame(expected_frequency, columns=df_test.columns, index=df_test.index)
chi2, p = [], []
for yates in [0, 0.5]:
df_chi2 = (abs(df_test - df_expected) - yates)**2 / df_expected
chi2.append(df_chi2.sum().sum())
df = (len(df_test.columns)-1) * (len(df_test.index)-1)
p.append(st.chi2.sf(chi2, df)) # 片側検定(上側確率を参照)
# 結果を出力する
display(df_expected)
# 期待度数の内、20%以上が5未満であった時、警告を出す
check = [i for expected in expected_frequency for i in expected if i < 5]
if (len(check) > len(expected_frequency)//5):
print("注意:期待度数の内、5未満の値が全体の20%以上存在します!\nフィッシャーの正確確率検定の使用をオススメします!")
return chi2[0], p[0][0], chi2[1], p[1][1]
# テスト1
import numpy as np
import pandas as pd
import scipy.stats as st
df_test = pd.DataFrame([[13, 7], [5, 15]], columns=['治った', '治らなかった'], index=['薬剤群', 'コントロール群'])
print('入力:')
display(df_test)
print('出力:')
answers = chi2_test(df_test)
print(answers)
# テスト2
import numpy as np
import pandas as pd
import scipy.stats as st
df_test = pd.DataFrame([[35, 71], [52, 61]], columns=['草食系', '肉食系'], index=['女性', '男性'])
print('入力:')
display(df_test)
print('出力:')
answers = chi2_test(df_test)
print(answers)
# テスト3
import numpy as np
import pandas as pd
import scipy.stats as st
df_test = pd.DataFrame([[24, 3, 999], [85, 1, 999]], columns=['痩せ', '標準', '肥満'], index=['女性', '男性'])
print('入力:')
display(df_test)
print('出力:')
answers = chi2_test(df_test)
print(answers)
# テスト4
import numpy as np
import pandas as pd
import scipy.stats as st
df_test = pd.DataFrame([55, 22, 16, 7], index=['A型', 'B型', 'C型', 'クワガタ'], columns=['人類']).T.rename_axis('test')
df_theory = pd.DataFrame([40, 30, 20, 10], index=['A型', 'B型', 'C型', 'クワガタ'], columns=['人類']).T.rename_axis('theory')
print('入力:')
display(df_test)
display(df_theory)
print('出力:')
answers = chi2_test(df_test, df_theory)
print(answers)
# テスト5
import numpy as np
import pandas as pd
import scipy.stats as st
df_test = | pd.DataFrame([[12, 34, 56, 78], [87, 65, 43, 21]], columns=['A型', 'B型', 'C型', 'ニイガタ'], index=['男性', '女性']) | pandas.DataFrame |
import os
import numpy as np
import random
import pandas as pd
import time as tm
from operator import itemgetter
from sklearn.model_selection import train_test_split
import pickle as pkl
import scipy.sparse
from metrics import *
from gutils import *
from graph import *
#' data preperation
def input_data(DataDir):
Link_Graph(outputdir='Infor_Data')
DataPath1 = '{}/Pseudo_ST1.csv'.format(DataDir)
DataPath2 = '{}/Real_ST2.csv'.format(DataDir)
LabelsPath1 = '{}/Pseudo_Label1.csv'.format(DataDir)
LabelsPath2 = '{}/Real_Label2.csv'.format(DataDir)
#' read the data
data1 = pd.read_csv(DataPath1, index_col=0, sep=',')
data2 = pd.read_csv(DataPath2, index_col=0, sep=',')
lab_label1 = | pd.read_csv(LabelsPath1, header=0, index_col=False, sep=',') | pandas.read_csv |
#Creo el dataset para la predicción del boosting
import gc
gc.collect()
import pandas as pd
import seaborn as sns
import numpy as np
#%% marzo
marzo = pd.read_csv(r'C:\Users\argomezja\Desktop\Data Science\MELI challenge\Project MELI\Dataset_limpios\marzo_limpio.csv.gz')
marzo = marzo.loc[marzo['day']>=4].reset_index(drop=True)
marzo['day']=marzo['day']-3
#Trabajo mejor el price
marzo = marzo.assign(current_price=marzo.groupby('currency').transform(lambda x: (x - x.min()) / (x.max()- x.min())))
subtest1 = marzo[['sku', 'day', 'sold_quantity']]
subtest1= subtest1.pivot_table(index = 'sku', columns= 'day', values = 'sold_quantity').add_prefix('sales')
subtest2 = marzo[['sku', 'day', 'current_price']]
subtest2= subtest2.pivot_table(index = 'sku', columns= 'day', values = 'current_price').add_prefix('price')
subtest3 = marzo[['sku', 'day', 'minutes_active']]
subtest3= subtest3.pivot_table(index = 'sku', columns= 'day', values = 'minutes_active').add_prefix('active_time')
subtest4 = marzo[['sku', 'day', 'listing_type']]
subtest4= subtest4.pivot_table(index = 'sku', columns= 'day', values = 'listing_type').add_prefix('listing_type')
subtest6 = marzo[['sku', 'day', 'shipping_logistic_type']]
subtest6= subtest6.pivot_table(index = 'sku', columns= 'day', values = 'shipping_logistic_type').add_prefix('shipping_logistic_type')
subtest7 = marzo[['sku', 'day', 'shipping_payment']]
subtest7= subtest7.pivot_table(index = 'sku', columns= 'day', values = 'shipping_payment').add_prefix('shipping_payment')
final = pd.merge(subtest1, subtest2, left_index=True, right_index=True )
final = pd.merge(final, subtest3, left_index=True, right_index=True)
final = pd.merge(final, subtest4, left_index=True, right_index=True)
final = pd.merge(final, subtest6, left_index=True, right_index=True)
final = pd.merge(final, subtest7, left_index=True, right_index=True)
del subtest1,subtest2,subtest3,subtest4,subtest6, subtest7
#%% Promedios cada 3 dias
marzo_test = marzo.sort_values(['sku','day']).reset_index(drop=True).copy()
marzo_test['promedio_3'] = marzo.groupby(['sku'])['sold_quantity'].rolling(3, min_periods=3).mean().reset_index(drop=True)
marzo_test['promedio_7'] = marzo.groupby(['sku'])['sold_quantity'].rolling(7, min_periods=7).mean().reset_index(drop=True)
marzo_test['promedio_15'] = marzo.groupby(['sku'])['sold_quantity'].rolling(15, min_periods=15).mean().reset_index(drop=True)
marzo_test['promedio_20'] = marzo.groupby(['sku'])['sold_quantity'].rolling(20, min_periods=20).mean().reset_index(drop=True)
# Pivoteo y mergeo
subtest3 = marzo_test[['sku', 'day', 'promedio_3']]
subtest3= subtest3.pivot_table(index = 'sku', columns= 'day', values = 'promedio_3', dropna=False).add_prefix('promedio_3')
subtest4 = marzo_test[['sku', 'day', 'promedio_7']]
subtest4= subtest4.pivot_table(index = 'sku', columns= 'day', values = 'promedio_7', dropna=False).add_prefix('promedio_7')
subtest6 = marzo_test[['sku', 'day', 'promedio_15']]
subtest6= subtest6.pivot_table(index = 'sku', columns= 'day', values = 'promedio_15', dropna=False).add_prefix('promedio_15')
subtest7 = marzo_test[['sku', 'day', 'promedio_20']]
subtest7= subtest7.pivot_table(index = 'sku', columns= 'day', values = 'promedio_20', dropna=False).add_prefix('promedio_20')
final = pd.merge(final, subtest3, left_index=True, right_index=True)
final = | pd.merge(final, subtest4, left_index=True, right_index=True) | pandas.merge |
# -*- coding: utf-8 -*-
from pandas import DataFrame, Series
from talib import abstract
from xing.xaquery import Query
from xing import xacom
# https://cryptotrader.org/talib
# https://github.com/mrjbq7/ta-lib
class Chartdata:
"""차트 데이터를 추출 및 관리하고, 이를 통해 보조지표를 생성하는 클래스
:param shcode: 종목 코드
:type shcode: str
::
chart = Chartdata("012510")
"""
DAY = 99997
"""Chartdata '일'에 대한 상수
"""
WEEK = 99998
"""Chartdata '주'에 대한 상수
"""
MONTH = 99999
"""Chartdata '월'에 대한 상수
"""
def __init__(self, shcode):
self._shcode = shcode
self._data = {}
'''
Chartdata.DAY : [ "20100101", "20101231"],
Chartdata.MONTH : ("20100101", "20101231"),
Chartdata.WEEK : [ "20100101" ],
5 : ["20100101"],
12 : ("20100101",),
15 : "20100101",
30 : ""
'''
def _parseParam(self, param):
p = {}
today = xacom.today()
for k,v in param.items():
if isinstance(v, (list, tuple)):
if len(v) < 2:
p[k] = [v[0], today]
else:
p[k] = v[:2]
else:
p[k] = [v, today]
return p
def load(self, param):
"""차트 데이터를 조회하여 누적한다.
:param param: 조회할 차트 종류(분,일,월,주)와 조회할 기간
:type param: object { 조회할차트정보 : [시작일(yyyymmdd), 종료일(yyyymmdd)]}
:return: self
.. note:: 한번 load한 데이터는 load는 clean 하지 않는 이상, 기존 데이터를 갱신하지 않고, 존재하지 않는 기간만 추가한다.
::
chart = Chartdata("012510")
chart.load({
Chartdata.DAY : [ startdate , enddate ]
Chartdata.WEEK : [ startdate , enddate ]
Chartdata.MONTH : [ startdate ]
1 : startdate
})
"""
p = self._parseParam(param)
for k,v in p.items():
if k in self._data:
latestDate = self._data[k]["date"].max()
isMinType = self._getChartType(k) == 0
if latestDate > p[k][1]:
pass
elif not isMinType and latestDate == p[k][1]:
# 분타입이 아니고 마지막 날자와 조회날짜가 같으면 skip
pass
else:
# 분 타입인 경우, 마지막 날짜의 데이터 삭제. 그렇지 않은 일주월 타입은 삭제하지 않음
df = self._data[k][self._data[k].date != latestDate] if isMinType else self._data[k]
newDf = self._query(k, latestDate, p[k][1]).append(df)
self._data[k] = newDf.sort_values(by=["date"], ascending=[True])
else:
self._data[k] = self._query(k, p[k][0], p[k][1]).sort_values(by=["date"], ascending=[True])
return self
# 차트 데이터를 조회한다.
def _query(self, type, startdate = "", enddate = ""):
enddate = enddate if enddate else xacom.today()
chartType = self._getChartType(type)
if chartType == 0: # 분
df = (Query("t8412", True).request({
"InBlock" : {
"shcode" : self._shcode,
"qrycnt" : 2000,
"comp_yn" : "Y",
"sdate" : startdate,
"edate" : enddate,
"ncnt" : type
}
},{
"OutBlock" : ("cts_date", "cts_time"),
"OutBlock1" : | DataFrame(columns=("date", "time", "open", "high", "low", "close", "jdiff_vol","sign")) | pandas.DataFrame |
""" `snps`
tools for reading, writing, merging, and remapping SNPs
"""
"""
BSD 3-Clause License
Copyright (c) 2019, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from itertools import groupby, count
import os
import re
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from snps.ensembl import EnsemblRestClient
from snps.resources import Resources
from snps.io import Reader, Writer
from snps.utils import save_df_as_csv, Parallelizer, clean_str
# set version string with Versioneer
from snps._version import get_versions
import logging
logger = logging.getLogger(__name__)
__version__ = get_versions()["version"]
del get_versions
class SNPs:
def __init__(
self,
file="",
only_detect_source=False,
assign_par_snps=True,
output_dir="output",
resources_dir="resources",
deduplicate=True,
deduplicate_XY_chrom=True,
parallelize=False,
processes=os.cpu_count(),
rsids=(),
):
""" Object used to read and parse genotype / raw data files.
Parameters
----------
file : str or bytes
path to file to load or bytes to load
only_detect_source : bool
only detect the source of the data
assign_par_snps : bool
assign PAR SNPs to the X and Y chromosomes
output_dir : str
path to output directory
resources_dir : str
name / path of resources directory
deduplicate : bool
deduplicate RSIDs and make SNPs available as `duplicate_snps`
deduplicate_XY_chrom : bool
deduplicate alleles in the non-PAR regions of X and Y for males; see `discrepant_XY_snps`
parallelize : bool
utilize multiprocessing to speedup calculations
processes : int
processes to launch if multiprocessing
rsids : tuple, optional
rsids to extract if loading a VCF file
"""
self._file = file
self._only_detect_source = only_detect_source
self._snps = pd.DataFrame()
self._duplicate_snps = pd.DataFrame()
self._discrepant_XY_snps = pd.DataFrame()
self._source = ""
self._phased = False
self._build = 0
self._build_detected = False
self._output_dir = output_dir
self._resources = Resources(resources_dir=resources_dir)
self._parallelizer = Parallelizer(parallelize=parallelize, processes=processes)
if file:
d = self._read_raw_data(file, only_detect_source, rsids)
self._snps = d["snps"]
self._source = d["source"]
self._phased = d["phased"]
if not self._snps.empty:
self.sort_snps()
if deduplicate:
self._deduplicate_rsids()
self._build = self.detect_build()
if not self._build:
self._build = 37 # assume Build 37 / GRCh37 if not detected
else:
self._build_detected = True
if deduplicate_XY_chrom:
if self.determine_sex() == "Male":
self._deduplicate_XY_chrom()
if assign_par_snps:
self._assign_par_snps()
def __repr__(self):
return "SNPs({!r})".format(self._file[0:50])
@property
def source(self):
""" Summary of the SNP data source for ``SNPs``.
Returns
-------
str
"""
return self._source
@property
def snps(self):
""" Get a copy of SNPs.
Returns
-------
pandas.DataFrame
"""
return self._snps
@property
def duplicate_snps(self):
""" Get any duplicate SNPs.
A duplicate SNP has the same RSID as another SNP. The first occurrence
of the RSID is not considered a duplicate SNP.
Returns
-------
pandas.DataFrame
"""
return self._duplicate_snps
@property
def discrepant_XY_snps(self):
""" Get any discrepant XY SNPs.
A discrepant XY SNP is a heterozygous SNP in the non-PAR region of the X
or Y chromosome found during deduplication for a detected male genotype.
Returns
-------
pandas.DataFrame
"""
return self._discrepant_XY_snps
@property
def build(self):
""" Get the build of ``SNPs``.
Returns
-------
int
"""
return self._build
@property
def build_detected(self):
""" Get status indicating if build of ``SNPs`` was detected.
Returns
-------
bool
"""
return self._build_detected
@property
def assembly(self):
""" Get the assembly of ``SNPs``.
Returns
-------
str
"""
return self.get_assembly()
@property
def snp_count(self):
""" Count of SNPs.
Returns
-------
int
"""
return self.get_snp_count()
@property
def chromosomes(self):
""" Chromosomes of ``SNPs``.
Returns
-------
list
list of str chromosomes (e.g., ['1', '2', '3', 'MT'], empty list if no chromosomes
"""
return self.get_chromosomes()
@property
def chromosomes_summary(self):
""" Summary of the chromosomes of ``SNPs``.
Returns
-------
str
human-readable listing of chromosomes (e.g., '1-3, MT'), empty str if no chromosomes
"""
return self.get_chromosomes_summary()
@property
def sex(self):
""" Sex derived from ``SNPs``.
Returns
-------
str
'Male' or 'Female' if detected, else empty str
"""
sex = self.determine_sex(chrom="X")
if not sex:
sex = self.determine_sex(chrom="Y")
return sex
@property
def unannotated_vcf(self):
""" Indicates if VCF file is unannotated.
Returns
-------
bool
"""
if self.snp_count == 0 and self.source == "vcf":
return True
return False
@property
def phased(self):
""" Indicates if genotype is phased.
Returns
-------
bool
"""
return self._phased
def heterozygous_snps(self, chrom=""):
""" Get heterozygous SNPs.
Parameters
----------
chrom : str, optional
chromosome (e.g., "1", "X", "MT")
Returns
-------
pandas.DataFrame
"""
if chrom:
return self._snps.loc[
(self._snps.chrom == chrom)
& (self._snps.genotype.notnull())
& (self._snps.genotype.str.len() == 2)
& (self._snps.genotype.str[0] != self._snps.genotype.str[1])
]
else:
return self._snps.loc[
(self._snps.genotype.notnull())
& (self._snps.genotype.str.len() == 2)
& (self._snps.genotype.str[0] != self._snps.genotype.str[1])
]
def not_null_snps(self, chrom=""):
""" Get not null SNPs.
Parameters
----------
chrom : str, optional
chromosome (e.g., "1", "X", "MT")
Returns
-------
pandas.DataFrame
"""
if chrom:
return self._snps.loc[
(self._snps.chrom == chrom) & (self._snps.genotype.notnull())
]
else:
return self._snps.loc[self._snps.genotype.notnull()]
def get_summary(self):
""" Get summary of ``SNPs``.
Returns
-------
dict
summary info if ``SNPs`` is valid, else {}
"""
if not self.is_valid():
return {}
else:
return {
"source": self.source,
"assembly": self.assembly,
"build": self.build,
"build_detected": self.build_detected,
"snp_count": self.snp_count,
"chromosomes": self.chromosomes_summary,
"sex": self.sex,
}
def is_valid(self):
""" Determine if ``SNPs`` is valid.
``SNPs`` is valid when the input file has been successfully parsed.
Returns
-------
bool
True if ``SNPs`` is valid
"""
if self._snps.empty:
return False
else:
return True
def save_snps(self, filename="", vcf=False, atomic=True, **kwargs):
""" Save SNPs to file.
Parameters
----------
filename : str or buffer
filename for file to save or buffer to write to
vcf : bool
flag to save file as VCF
atomic : bool
atomically write output to a file on local filesystem
**kwargs
additional parameters to `pandas.DataFrame.to_csv`
Returns
-------
str
path to file in output directory if SNPs were saved, else empty str
"""
return Writer.write_file(
snps=self, filename=filename, vcf=vcf, atomic=atomic, **kwargs
)
def _read_raw_data(self, file, only_detect_source, rsids):
return Reader.read_file(file, only_detect_source, self._resources, rsids)
def _assign_par_snps(self):
""" Assign PAR SNPs to the X or Y chromosome using SNP position.
References
-----
1. National Center for Biotechnology Information, Variation Services, RefSNP,
https://api.ncbi.nlm.nih.gov/variation/v0/
2. Yates et. al. (doi:10.1093/bioinformatics/btu613),
`<http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613>`_
3. Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098
4. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
dbSNP: the NCBI database of genetic variation. Nucleic Acids Res. 2001 Jan 1;
29(1):308-11.
5. Database of Single Nucleotide Polymorphisms (dbSNP). Bethesda (MD): National Center
for Biotechnology Information, National Library of Medicine. dbSNP accession:
rs28736870, rs113313554, and rs758419898 (dbSNP Build ID: 151). Available from:
http://www.ncbi.nlm.nih.gov/SNP/
"""
rest_client = EnsemblRestClient(
server="https://api.ncbi.nlm.nih.gov", reqs_per_sec=1
)
for rsid in self._snps.loc[self._snps["chrom"] == "PAR"].index.values:
if "rs" in rsid:
id = rsid.split("rs")[1]
response = rest_client.perform_rest_action("/variation/v0/refsnp/" + id)
if response is not None:
for item in response["primary_snapshot_data"][
"placements_with_allele"
]:
if "NC_000023" in item["seq_id"]:
assigned = self._assign_snp(rsid, item["alleles"], "X")
elif "NC_000024" in item["seq_id"]:
assigned = self._assign_snp(rsid, item["alleles"], "Y")
else:
assigned = False
if assigned:
if not self._build_detected:
self._build = self._extract_build(item)
self._build_detected = True
break
def _assign_snp(self, rsid, alleles, chrom):
# only assign SNP if positions match (i.e., same build)
for allele in alleles:
allele_pos = allele["allele"]["spdi"]["position"]
# ref SNP positions seem to be 0-based...
if allele_pos == self._snps.loc[rsid].pos - 1:
self._snps.loc[rsid, "chrom"] = chrom
return True
return False
def _extract_build(self, item):
assembly_name = item["placement_annot"]["seq_id_traits_by_assembly"][0][
"assembly_name"
]
assembly_name = assembly_name.split(".")[0]
return int(assembly_name[-2:])
def detect_build(self):
""" Detect build of SNPs.
Use the coordinates of common SNPs to identify the build / assembly of a genotype file
that is being loaded.
Notes
-----
rs3094315 : plus strand in 36, 37, and 38
rs11928389 : plus strand in 36, minus strand in 37 and 38
rs2500347 : plus strand in 36 and 37, minus strand in 38
rs964481 : plus strand in 36, 37, and 38
rs2341354 : plus strand in 36, 37, and 38
Returns
-------
int
detected build of SNPs, else 0
References
----------
1. Yates et. al. (doi:10.1093/bioinformatics/btu613),
`<http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613>`_
2. Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098
3. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
dbSNP: the NCBI database of genetic variation. Nucleic Acids Res. 2001
Jan 1;29(1):308-11.
4. Database of Single Nucleotide Polymorphisms (dbSNP). Bethesda (MD): National Center
for Biotechnology Information, National Library of Medicine. dbSNP accession: rs3094315,
rs11928389, rs2500347, rs964481, and rs2341354 (dbSNP Build ID: 151). Available from:
http://www.ncbi.nlm.nih.gov/SNP/
"""
def lookup_build_with_snp_pos(pos, s):
try:
return s.loc[s == pos].index[0]
except:
return 0
build = 0
rsids = ["rs3094315", "rs11928389", "rs2500347", "rs964481", "rs2341354"]
df = pd.DataFrame(
{
36: [742429, 50908372, 143649677, 27566744, 908436],
37: [752566, 50927009, 144938320, 27656823, 918573],
38: [817186, 50889578, 148946169, 27638706, 983193],
},
index=rsids,
)
for rsid in rsids:
if rsid in self._snps.index:
build = lookup_build_with_snp_pos(
self._snps.loc[rsid].pos, df.loc[rsid]
)
if build:
break
return build
def get_assembly(self):
""" Get the assembly of a build.
Returns
-------
str
"""
if self._build == 37:
return "GRCh37"
elif self._build == 36:
return "NCBI36"
elif self._build == 38:
return "GRCh38"
else:
return ""
def get_snp_count(self, chrom=""):
""" Count of SNPs.
Parameters
----------
chrom : str, optional
chromosome (e.g., "1", "X", "MT")
Returns
-------
int
"""
if chrom:
return len(self._snps.loc[(self._snps.chrom == chrom)])
else:
return len(self._snps)
def get_chromosomes(self):
""" Get the chromosomes of SNPs.
Returns
-------
list
list of str chromosomes (e.g., ['1', '2', '3', 'MT'], empty list if no chromosomes
"""
if not self._snps.empty:
return list(pd.unique(self._snps["chrom"]))
else:
return []
def get_chromosomes_summary(self):
""" Summary of the chromosomes of SNPs.
Returns
-------
str
human-readable listing of chromosomes (e.g., '1-3, MT'), empty str if no chromosomes
"""
if not self._snps.empty:
chroms = list(pd.unique(self._snps["chrom"]))
int_chroms = [int(chrom) for chrom in chroms if chrom.isdigit()]
str_chroms = [chrom for chrom in chroms if not chrom.isdigit()]
# https://codereview.stackexchange.com/a/5202
def as_range(iterable):
l = list(iterable)
if len(l) > 1:
return "{0}-{1}".format(l[0], l[-1])
else:
return "{0}".format(l[0])
# create str representations
int_chroms = ", ".join(
as_range(g)
for _, g in groupby(int_chroms, key=lambda n, c=count(): n - next(c))
)
str_chroms = ", ".join(str_chroms)
if int_chroms != "" and str_chroms != "":
int_chroms += ", "
return int_chroms + str_chroms
else:
return ""
def determine_sex(
self,
heterozygous_x_snps_threshold=0.03,
y_snps_not_null_threshold=0.3,
chrom="X",
):
""" Determine sex from SNPs using thresholds.
Parameters
----------
heterozygous_x_snps_threshold : float
percentage heterozygous X SNPs; above this threshold, Female is determined
y_snps_not_null_threshold : float
percentage Y SNPs that are not null; above this threshold, Male is determined
chrom : {"X", "Y"}
use X or Y chromosome SNPs to determine sex
Returns
-------
str
'Male' or 'Female' if detected, else empty str
"""
if not self._snps.empty:
if chrom == "X":
return self._determine_sex_X(heterozygous_x_snps_threshold)
elif chrom == "Y":
return self._determine_sex_Y(y_snps_not_null_threshold)
return ""
def _determine_sex_X(self, threshold):
x_snps = self.get_snp_count("X")
if x_snps > 0:
if len(self.heterozygous_snps("X")) / x_snps > threshold:
return "Female"
else:
return "Male"
else:
return ""
def _determine_sex_Y(self, threshold):
y_snps = self.get_snp_count("Y")
if y_snps > 0:
if len(self.not_null_snps("Y")) / y_snps > threshold:
return "Male"
else:
return "Female"
else:
return ""
def _get_non_par_start_stop(self, chrom):
# get non-PAR start / stop positions for chrom
pr = self.get_par_regions(self.build)
np_start = pr.loc[(pr.chrom == chrom) & (pr.region == "PAR1")].stop.values[0]
np_stop = pr.loc[(pr.chrom == chrom) & (pr.region == "PAR2")].start.values[0]
return np_start, np_stop
def _get_non_par_snps(self, chrom, heterozygous=True):
np_start, np_stop = self._get_non_par_start_stop(chrom)
if heterozygous:
# get heterozygous SNPs in the non-PAR region (i.e., discrepant XY SNPs)
return self._snps.loc[
(self._snps.chrom == chrom)
& (self._snps.genotype.notnull())
& (self._snps.genotype.str.len() == 2)
& (self._snps.genotype.str[0] != self._snps.genotype.str[1])
& (self._snps.pos > np_start)
& (self._snps.pos < np_stop)
].index
else:
# get homozygous SNPs in the non-PAR region
return self._snps.loc[
(self._snps.chrom == chrom)
& (self._snps.genotype.notnull())
& (self._snps.genotype.str.len() == 2)
& (self._snps.genotype.str[0] == self._snps.genotype.str[1])
& (self._snps.pos > np_start)
& (self._snps.pos < np_stop)
].index
def _deduplicate_rsids(self):
# Keep first duplicate rsid.
duplicate_rsids = self._snps.index.duplicated(keep="first")
# save duplicate SNPs
self._duplicate_snps = self._duplicate_snps.append(
self._snps.loc[duplicate_rsids]
)
# deduplicate
self._snps = self._snps.loc[~duplicate_rsids]
def _deduplicate_chrom(self, chrom):
""" Deduplicate a chromosome in the non-PAR region. """
discrepant_XY_snps = self._get_non_par_snps(chrom)
# save discrepant XY SNPs
self._discrepant_XY_snps = self._discrepant_XY_snps.append(
self._snps.loc[discrepant_XY_snps]
)
# drop discrepant XY SNPs since it's ambiguous for which allele to deduplicate
self._snps.drop(discrepant_XY_snps, inplace=True)
# get remaining non-PAR SNPs with two alleles
non_par_snps = self._get_non_par_snps(chrom, heterozygous=False)
# remove duplicate allele
self._snps.loc[non_par_snps, "genotype"] = self._snps.loc[
non_par_snps, "genotype"
].apply(lambda x: x[0])
def _deduplicate_XY_chrom(self):
""" Fix chromosome issue where some data providers duplicate male X and Y chromosomes"""
self._deduplicate_chrom("X")
self._deduplicate_chrom("Y")
@staticmethod
def get_par_regions(build):
""" Get PAR regions for the X and Y chromosomes.
Parameters
----------
build : int
build of SNPs
Returns
-------
pandas.DataFrame
PAR regions for the given build
References
----------
1. Genome Reference Consortium, https://www.ncbi.nlm.nih.gov/grc/human
2. Yates et. al. (doi:10.1093/bioinformatics/btu613),
`<http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613>`_
3. Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098
"""
if build == 37:
return pd.DataFrame(
{
"region": ["PAR1", "PAR2", "PAR1", "PAR2"],
"chrom": ["X", "X", "Y", "Y"],
"start": [60001, 154931044, 10001, 59034050],
"stop": [2699520, 155260560, 2649520, 59363566],
},
columns=["region", "chrom", "start", "stop"],
)
elif build == 38:
return pd.DataFrame(
{
"region": ["PAR1", "PAR2", "PAR1", "PAR2"],
"chrom": ["X", "X", "Y", "Y"],
"start": [10001, 155701383, 10001, 56887903],
"stop": [2781479, 156030895, 2781479, 57217415],
},
columns=["region", "chrom", "start", "stop"],
)
elif build == 36:
return pd.DataFrame(
{
"region": ["PAR1", "PAR2", "PAR1", "PAR2"],
"chrom": ["X", "X", "Y", "Y"],
"start": [1, 154584238, 1, 57443438],
"stop": [2709520, 154913754, 2709520, 57772954],
},
columns=["region", "chrom", "start", "stop"],
)
else:
return pd.DataFrame()
def sort_snps(self):
""" Sort SNPs based on ordered chromosome list and position. """
sorted_list = sorted(self._snps["chrom"].unique(), key=self._natural_sort_key)
# move PAR and MT to the end of the dataframe
if "PAR" in sorted_list:
sorted_list.remove("PAR")
sorted_list.append("PAR")
if "MT" in sorted_list:
sorted_list.remove("MT")
sorted_list.append("MT")
# convert chrom column to category for sorting
# https://stackoverflow.com/a/26707444
self._snps["chrom"] = self._snps["chrom"].astype(
CategoricalDtype(categories=sorted_list, ordered=True)
)
# sort based on ordered chromosome list and position
snps = self._snps.sort_values(["chrom", "pos"])
# convert chromosome back to object
snps["chrom"] = snps["chrom"].astype(object)
self._snps = snps
def remap_snps(self, target_assembly, complement_bases=True):
""" Remap SNP coordinates from one assembly to another.
This method uses the assembly map endpoint of the Ensembl REST API service (via
``Resources``'s ``EnsemblRestClient``) to convert SNP coordinates / positions from one
assembly to another. After remapping, the coordinates / positions for the
SNPs will be that of the target assembly.
If the SNPs are already mapped relative to the target assembly, remapping will not be
performed.
Parameters
----------
target_assembly : {'NCBI36', 'GRCh37', 'GRCh38', 36, 37, 38}
assembly to remap to
complement_bases : bool
complement bases when remapping SNPs to the minus strand
Returns
-------
chromosomes_remapped : list of str
chromosomes remapped
chromosomes_not_remapped : list of str
chromosomes not remapped
Notes
-----
An assembly is also know as a "build." For example:
Assembly NCBI36 = Build 36
Assembly GRCh37 = Build 37
Assembly GRCh38 = Build 38
See https://www.ncbi.nlm.nih.gov/assembly for more information about assemblies and
remapping.
References
----------
1. Ensembl, Assembly Map Endpoint,
http://rest.ensembl.org/documentation/info/assembly_map
"""
chromosomes_remapped = []
chromosomes_not_remapped = []
snps = self.snps
if snps.empty:
logger.warning("No SNPs to remap")
return chromosomes_remapped, chromosomes_not_remapped
else:
chromosomes = snps["chrom"].unique()
chromosomes_not_remapped = list(chromosomes)
valid_assemblies = ["NCBI36", "GRCh37", "GRCh38", 36, 37, 38]
if target_assembly not in valid_assemblies:
logger.warning("Invalid target assembly")
return chromosomes_remapped, chromosomes_not_remapped
if isinstance(target_assembly, int):
if target_assembly == 36:
target_assembly = "NCBI36"
else:
target_assembly = "GRCh" + str(target_assembly)
if self.build == 36:
source_assembly = "NCBI36"
else:
source_assembly = "GRCh" + str(self.build)
if source_assembly == target_assembly:
return chromosomes_remapped, chromosomes_not_remapped
assembly_mapping_data = self._resources.get_assembly_mapping_data(
source_assembly, target_assembly
)
if not assembly_mapping_data:
return chromosomes_remapped, chromosomes_not_remapped
tasks = []
for chrom in chromosomes:
if chrom in assembly_mapping_data:
chromosomes_remapped.append(chrom)
chromosomes_not_remapped.remove(chrom)
mappings = assembly_mapping_data[chrom]
tasks.append(
{
"snps": snps.loc[snps["chrom"] == chrom],
"mappings": mappings,
"complement_bases": complement_bases,
}
)
else:
logger.warning(
"Chromosome {} not remapped; "
"removing chromosome from SNPs for consistency".format(chrom)
)
snps = snps.drop(snps.loc[snps["chrom"] == chrom].index)
# remap SNPs
remapped_snps = self._parallelizer(self._remapper, tasks)
remapped_snps = pd.concat(remapped_snps)
# update SNP positions and genotypes
snps.loc[remapped_snps.index, "pos"] = remapped_snps["pos"]
snps.loc[remapped_snps.index, "genotype"] = remapped_snps["genotype"]
self._snps = snps
self.sort_snps()
self._build = int(target_assembly[-2:])
return chromosomes_remapped, chromosomes_not_remapped
def _remapper(self, task):
""" Remap SNPs for a chromosome.
Parameters
----------
task : dict
dict with `snps` to remap per `mappings`, optionally `complement_bases`
Returns
-------
pandas.DataFrame
remapped SNPs
"""
temp = task["snps"].copy()
mappings = task["mappings"]
complement_bases = task["complement_bases"]
temp["remapped"] = False
pos_start = int(temp["pos"].describe()["min"])
pos_end = int(temp["pos"].describe()["max"])
for mapping in mappings["mappings"]:
# skip if mapping is outside of range of SNP positions
if (
mapping["original"]["end"] <= pos_start
or mapping["original"]["start"] >= pos_end
):
continue
orig_range_len = mapping["original"]["end"] - mapping["original"]["start"]
mapped_range_len = mapping["mapped"]["end"] - mapping["mapped"]["start"]
orig_region = mapping["original"]["seq_region_name"]
mapped_region = mapping["mapped"]["seq_region_name"]
if orig_region != mapped_region:
logger.warning("discrepant chroms")
continue
if orig_range_len != mapped_range_len:
logger.warning(
"discrepant coords"
) # observed when mapping NCBI36 -> GRCh38
continue
# find the SNPs that are being remapped for this mapping
snp_indices = temp.loc[
~temp["remapped"]
& (temp["pos"] >= mapping["original"]["start"])
& (temp["pos"] <= mapping["original"]["end"])
].index
if len(snp_indices) > 0:
# remap the SNPs
if mapping["mapped"]["strand"] == -1:
# flip and (optionally) complement since we're mapping to minus strand
diff_from_start = (
temp.loc[snp_indices, "pos"] - mapping["original"]["start"]
)
temp.loc[snp_indices, "pos"] = (
mapping["mapped"]["end"] - diff_from_start
)
if complement_bases:
temp.loc[snp_indices, "genotype"] = temp.loc[
snp_indices, "genotype"
].apply(self._complement_bases)
else:
# mapping is on same (plus) strand, so just remap based on offset
offset = mapping["mapped"]["start"] - mapping["original"]["start"]
temp.loc[snp_indices, "pos"] = temp["pos"] + offset
# mark these SNPs as remapped
temp.loc[snp_indices, "remapped"] = True
return temp
def _complement_bases(self, genotype):
if pd.isnull(genotype):
return np.nan
complement = ""
for base in list(genotype):
if base == "A":
complement += "T"
elif base == "G":
complement += "C"
elif base == "C":
complement += "G"
elif base == "T":
complement += "A"
else:
complement += base
return complement
# https://stackoverflow.com/a/16090640
@staticmethod
def _natural_sort_key(s, natural_sort_re=re.compile("([0-9]+)")):
return [
int(text) if text.isdigit() else text.lower()
for text in re.split(natural_sort_re, s)
]
class SNPsCollection(SNPs):
def __init__(self, raw_data=None, output_dir="output", name="", **kwargs):
"""
Parameters
----------
raw_data : list or str
path(s) to file(s) with raw genotype data
output_dir : str
path to output directory
name : str
name for this ``SNPsCollection``
"""
super().__init__(file="", output_dir=output_dir, **kwargs)
self._source = []
self._discrepant_positions_file_count = 0
self._discrepant_genotypes_file_count = 0
self._discrepant_positions = pd.DataFrame()
self._discrepant_genotypes = pd.DataFrame()
self._name = name
if raw_data is not None:
self.load_snps(raw_data)
def __repr__(self):
return "SNPsCollection(name={!r})".format(self._name)
@property
def source(self):
""" Summary of the SNP data source for ``SNPs``.
Returns
-------
str
"""
return ", ".join(self._source)
@property
def discrepant_positions(self):
""" SNPs with discrepant positions discovered while loading SNPs.
Returns
-------
pandas.DataFrame
"""
return self._discrepant_positions
@property
def discrepant_genotypes(self):
""" SNPs with discrepant genotypes discovered while loading SNPs.
Returns
-------
pandas.DataFrame
"""
return self._discrepant_genotypes
@property
def discrepant_snps(self):
""" SNPs with discrepant positions and / or genotypes discovered while loading SNPs.
Returns
-------
pandas.DataFrame
"""
df = self._discrepant_positions.append(self._discrepant_genotypes)
if len(df) > 1:
df = df.drop_duplicates()
return df
def load_snps(
self,
raw_data,
discrepant_snp_positions_threshold=100,
discrepant_genotypes_threshold=500,
save_output=False,
):
""" Load raw genotype data.
Parameters
----------
raw_data : list or str
path(s) to file(s) with raw genotype data
discrepant_snp_positions_threshold : int
threshold for discrepant SNP positions between existing data and data to be loaded,
a large value could indicate mismatched genome assemblies
discrepant_genotypes_threshold : int
threshold for discrepant genotype data between existing data and data to be loaded,
a large value could indicated mismatched individuals
save_output : bool
specifies whether to save discrepant SNP output to CSV files in the output directory
"""
if type(raw_data) is list:
for file in raw_data:
self._load_snps_helper(
file,
discrepant_snp_positions_threshold,
discrepant_genotypes_threshold,
save_output,
)
elif type(raw_data) is str:
self._load_snps_helper(
raw_data,
discrepant_snp_positions_threshold,
discrepant_genotypes_threshold,
save_output,
)
else:
raise TypeError("invalid filetype")
def _load_snps_helper(
self,
file,
discrepant_snp_positions_threshold,
discrepant_genotypes_threshold,
save_output,
):
logger.info("Loading {}".format(os.path.relpath(file)))
discrepant_positions, discrepant_genotypes = self._add_snps(
SNPs(file),
discrepant_snp_positions_threshold,
discrepant_genotypes_threshold,
save_output,
)
self._discrepant_positions = self._discrepant_positions.append(
discrepant_positions, sort=True
)
self._discrepant_genotypes = self._discrepant_genotypes.append(
discrepant_genotypes, sort=True
)
def save_snps(self, filename="", vcf=False, atomic=True, **kwargs):
""" Save SNPs to file.
Parameters
----------
filename : str
filename for file to save
vcf : bool
flag to save file as VCF
atomic : bool
atomically write output to a file on local filesystem
**kwargs
additional parameters to `pandas.DataFrame.to_csv`
Returns
-------
str
path to file in output directory if SNPs were saved, else empty str
"""
if not self._name:
prefix = ""
else:
prefix = "{}_".format(clean_str(self._name))
if not filename:
if vcf:
ext = ".vcf"
else:
ext = ".csv"
filename = "{}{}{}".format(prefix, self.assembly, ext)
return super().save_snps(filename=filename, vcf=vcf, atomic=atomic, **kwargs)
def save_discrepant_positions(self, filename=""):
""" Save SNPs with discrepant positions to file.
Parameters
----------
filename : str
filename for file to save
Returns
-------
str
path to file in output directory if SNPs were saved, else empty str
"""
return self._save_discrepant_snps_file(
self.discrepant_positions, "discrepant_positions", filename
)
def save_discrepant_genotypes(self, filename=""):
""" Save SNPs with discrepant genotypes to file.
Parameters
----------
filename : str
filename for file to save
Returns
-------
str
path to file in output directory if SNPs were saved, else empty str
"""
return self._save_discrepant_snps_file(
self.discrepant_genotypes, "discrepant_genotypes", filename
)
def save_discrepant_snps(self, filename=""):
""" Save SNPs with discrepant positions and / or genotypes to file.
Parameters
----------
filename : str
filename for file to save
Returns
-------
str
path to file in output directory if SNPs were saved, else empty str
"""
return self._save_discrepant_snps_file(
self.discrepant_snps, "discrepant_snps", filename
)
def _save_discrepant_snps_file(self, df, discrepant_snps_type, filename):
if not filename:
if not self._name:
filename = "{}.csv".format(discrepant_snps_type)
else:
filename = "{}_{}.csv".format(
clean_str(self._name), discrepant_snps_type
)
return save_df_as_csv(
df,
self._output_dir,
filename,
comment="# Source(s): {}\n".format(self.source),
)
def _add_snps(
self,
snps,
discrepant_snp_positions_threshold,
discrepant_genotypes_threshold,
save_output,
):
""" Add SNPs to this ``SNPsCollection``.
Parameters
----------
snps : SNPs
SNPs to add
discrepant_snp_positions_threshold : int
see above
discrepant_genotypes_threshold : int
see above
save_output
see above
Returns
-------
discrepant_positions : pandas.DataFrame
discrepant_genotypes : pandas.DataFrame
"""
discrepant_positions = | pd.DataFrame() | pandas.DataFrame |
"""Data transformation
.. currentmodule:: pygeochemtools.transform
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import pandas as pd
def long_to_wide(
df: pd.DataFrame,
sample_id: str,
element_id: str,
value: str,
units: str,
include_units: bool = False,
) -> pd.DataFrame:
"""Convert geochemical data tables from long to wide form.
This function takes a dataframe of long form geochemical data, i.e. data with one
row per element, and runs a pivot to convert it to a standard wide form data with
one row per sample and each element in a separate column.
It handles duplicate values based on sample_id and element_id by taking the first
duplicate value initially, then catching the second duplicate, performing a second
pivot, and appengind the duplicates to the final table. It does not handle duplicate
duplicates, in which case it will return only the first value.
Args:
df (pd.DataFrame): Dataframe containing long form data.
sample_id (str): Name of column containing sample ID's.
element_id (str): Name of column containing geochemical element names.
value (str): Name of column containing geochemical data values.
units (str): Name of column containing geochemical data units.
include_units (bool, optional): Wether to include units in the output. Defaults
to False.
Returns:
pd.DataFrame: Dataframe converted to wide table format with one sample per row
and columns for each element. Contains only sample_id and element/unit values.
"""
df = df
# grab duplicate values
duplicate_df = df[df.duplicated(subset=[sample_id, element_id], keep="last")]
df = df.drop_duplicates(subset=[sample_id, element_id])
if include_units:
data = df.pivot(
index=[sample_id], columns=element_id, values=[value]
).droplevel(0, axis=1)
unit = (
df.pivot(index=[sample_id], columns=element_id, values=[units])
.add_suffix("_UNIT")
.droplevel(0, axis=1)
)
assert (
data.columns.size == unit.columns.size
), "pivoted column lengths aren't equal"
c = np.empty((data.columns.size + unit.columns.size,), dtype=object,)
c[0::2], c[1::2] = (
data.columns,
unit.columns,
)
df_wide = | pd.concat([data, unit], axis=1) | pandas.concat |
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the zipline.assets package
"""
from contextlib import contextmanager
from datetime import timedelta
from functools import partial
import pickle
import sys
from types import GetSetDescriptorType
from unittest import TestCase
import uuid
import warnings
from nose_parameterized import parameterized
from numpy import full, int32, int64
import pandas as pd
from pandas.util.testing import assert_frame_equal
from six import PY2, viewkeys
import sqlalchemy as sa
from zipline.assets import (
Asset,
Equity,
Future,
AssetDBWriter,
AssetFinder,
)
from zipline.assets.synthetic import (
make_commodity_future_info,
make_rotating_equity_info,
make_simple_equity_info,
)
from six import itervalues, integer_types
from toolz import valmap
from zipline.assets.asset_writer import (
check_version_info,
write_version_info,
_futures_defaults,
SQLITE_MAX_VARIABLE_NUMBER,
)
from zipline.assets.asset_db_schema import ASSET_DB_VERSION
from zipline.assets.asset_db_migrations import (
downgrade
)
from zipline.errors import (
EquitiesNotFound,
FutureContractsNotFound,
MultipleSymbolsFound,
MultipleValuesFoundForField,
MultipleValuesFoundForSid,
NoValueForSid,
AssetDBVersionError,
SidsNotFound,
SymbolNotFound,
AssetDBImpossibleDowngrade,
ValueNotFoundForField,
)
from zipline.testing import (
all_subindices,
empty_assets_db,
parameter_space,
tmp_assets_db,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.fixtures import (
WithAssetFinder,
ZiplineTestCase,
WithTradingCalendars,
)
from zipline.utils.range import range
@contextmanager
def build_lookup_generic_cases(asset_finder_type):
"""
Generate test cases for the type of asset finder specific by
asset_finder_type for test_lookup_generic.
"""
unique_start = pd.Timestamp('2013-01-01', tz='UTC')
unique_end = pd.Timestamp('2014-01-01', tz='UTC')
dupe_0_start = | pd.Timestamp('2013-01-01', tz='UTC') | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
# ===========================================================================
# ===========================================================================
# !== <NAME> ==
# !== <NAME> ==
# !== <NAME> ==
# !== February 2021 ==
# ===========================================================================
"""
import numpy as np
import pandas as pd
for j in range(1,21):
particle = j
srt_particle = str(particle).zfill(2)
filename = 'el_osc_' + srt_particle
df = pd.read_csv(str(filename),
skiprows=0,
header=None,
delim_whitespace=True,
index_col=None,
names=['Time', 'a', 'e', 'i', 'Comega', 'omega', 'M'],
low_memory=False,
dtype={'Time': np.float64,
'a': np.float64,
'e': np.float64,
'i': np.float64,
'Comega': np.float64,
'omega': np.float64,
'M': np.float64}
)
aa= | pd.Series(df.a) | pandas.Series |
import json
import numpy as np
import pandas as pd
def query_diff(google_res, sengine_res):
diff = []
for idx,query in enumerate(sengine_res):
query = query.lower()
query = query.replace("https","")
query = query.replace("http","")
query = query.replace("www.","")
if(query[-1] =='/'):
query = query[:-1]
sengine_res[idx] = query
for idx,query in enumerate(google_res):
query = query.lower()
query = query.replace("https","")
query = query.replace("http","")
query = query.replace("www.","")
if(query[-1] =='/'):
query = query[:-1]
google_res[idx] = query
for idx_google, result in enumerate(google_res):
if result in sengine_res:
sengine_idx = sengine_res.index(result)
diff.append(idx_google - sengine_idx)
n = len(diff)
difference = np.array(diff)
return difference, n
def spearman_val(diff, n):
if n == 0:
return 0
elif n == 1:
if sum(diff) == 0:
return 1
else:
return 0
spearman_coef = 1 - ((6 * np.sum(diff ** 2)) / (n * (n ** 2 - 1)))
return spearman_coef
def generate_rank(google_pairs, sengine_pairs, output_file='hw1.csv'):
rank = | pd.DataFrame(columns=["Queries", "Number of Overlapping Results", "Percent Overlap", "Spearman Coefficient"]) | pandas.DataFrame |
import inspect
import os
import re
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from woodwork.logical_types import (
Boolean,
Categorical,
CountryCode,
Datetime,
Double,
Integer,
Ordinal,
SubRegionCode,
ZIPCode
)
from woodwork.tests.testing_utils import to_pandas
from woodwork.type_sys.utils import (
_get_specified_ltype_params,
_is_numeric_series,
list_logical_types,
list_semantic_tags
)
from woodwork.utils import (
_convert_input_to_set,
_get_mode,
_is_null_latlong,
_is_s3,
_is_url,
_new_dt_including,
_reformat_to_latlong,
_to_latlong_float,
camel_to_snake,
get_valid_mi_types,
import_or_none,
import_or_raise
)
dd = import_or_none('dask.dataframe')
ks = import_or_none('databricks.koalas')
def test_camel_to_snake():
test_items = {
'ZIPCode': 'zip_code',
'SubRegionCode': 'sub_region_code',
'NaturalLanguage': 'natural_language',
'Categorical': 'categorical',
}
for key, value in test_items.items():
assert camel_to_snake(key) == value
def test_convert_input_to_set():
error_message = "semantic_tags must be a string, set or list"
with pytest.raises(TypeError, match=error_message):
_convert_input_to_set(int)
error_message = "test_text must be a string, set or list"
with pytest.raises(TypeError, match=error_message):
_convert_input_to_set({'index': {}, 'time_index': {}}, 'test_text')
error_message = "include parameter must contain only strings"
with pytest.raises(TypeError, match=error_message):
_convert_input_to_set(['index', 1], 'include parameter')
semantic_tags_from_single = _convert_input_to_set('index', 'include parameter')
assert semantic_tags_from_single == {'index'}
semantic_tags_from_list = _convert_input_to_set(['index', 'numeric', 'category'])
assert semantic_tags_from_list == {'index', 'numeric', 'category'}
semantic_tags_from_set = _convert_input_to_set({'index', 'numeric', 'category'}, 'include parameter')
assert semantic_tags_from_set == {'index', 'numeric', 'category'}
def test_list_logical_types_default():
all_ltypes = ww.logical_types.LogicalType.__subclasses__()
df = list_logical_types()
assert set(df.columns) == {'name', 'type_string', 'description', 'physical_type',
'standard_tags', 'is_default_type', 'is_registered', 'parent_type'}
assert len(all_ltypes) == len(df)
for name in df['name']:
assert ww.type_system.str_to_logical_type(name) in all_ltypes
assert all(df['is_default_type'])
assert all(df['is_registered'])
def test_list_logical_types_customized_type_system():
ww.type_system.remove_type('URL')
class CustomRegistered(ww.logical_types.LogicalType):
pandas_dtype = 'int64'
class CustomNotRegistered(ww.logical_types.LogicalType):
pandas_dtype = 'int64'
ww.type_system.add_type(CustomRegistered)
all_ltypes = ww.logical_types.LogicalType.__subclasses__()
df = list_logical_types()
assert len(all_ltypes) == len(df)
# Check that URL is unregistered
assert df.loc[18, 'is_default_type']
assert not df.loc[18, 'is_registered']
# Check that new registered type is present and shows as registered
assert 'CustomRegistered' in df['name'].values
assert not df.loc[4, 'is_default_type']
assert df.loc[4, 'is_registered']
# Check that new unregistered type is present and shows as not registered
assert 'CustomNotRegistered' in df['name'].values
assert not df.loc[3, 'is_default_type']
assert not df.loc[3, 'is_registered']
ww.type_system.reset_defaults()
def test_list_semantic_tags():
df = list_semantic_tags()
assert set(df.columns) == {'name', 'is_standard_tag', 'valid_logical_types'}
for name, log_type_list in df[['name', 'valid_logical_types']].values:
if name not in ['index', 'time_index', 'date_of_birth']:
for log_type in log_type_list:
assert name in log_type.standard_tags
def test_get_mode():
series_list = [
pd.Series([1, 2, 3, 4, 2, 2, 3]),
pd.Series(['a', 'b', 'b', 'c', 'b']),
| pd.Series([3, 2, 3, 2]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 11 16:47:03 2015
@brief: Create sample sheet from TCGA metadata.json for Genomon.
@author: okada
$Id: create_samplesheet.py 127 2016-01-22 02:17:18Z aokada $
$Rev: 127 $
@code
create_samplesheet.py {TCGA metadata.json} {path to bam dir} {path to output_sample.csv} --check_result {bam check_result file} --config_file {option: config file}
@endcode
"""
rev = " $Rev: 127 $"
import numpy
import pandas
import os
import sys
import ConfigParser
import subcode
skip_template = "[analysis_id = {id}] is skipped, because of {reason}."
def main():
import argparse
#print sys.argv
#sys.argv = ['./create_samplesheet.py', './metadata.acc.json', '/', 'acc2.csv', '--config_file', './create_samplesheet.cfg', '--check_result', './result_acc.txt']
name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
# get args
parser = argparse.ArgumentParser(prog = name)
parser.add_argument("--version", action = "version", version = name + rev)
parser.add_argument('metadata', help = "metadata file download from TCGA", type = str)
parser.add_argument('bam_dir', help = "bam downloaded directory", type = str)
parser.add_argument('output_file', help = "output file, please input with format NAME.csv", type = str)
parser.add_argument('--check_result', help = "check_result file", type = str, default = "")
parser.add_argument("--config_file", help = "config file", type = str, default = "")
args = parser.parse_args()
create_samplesheet(args.metadata, args.bam_dir, args.output_file, args.check_result, args.config_file)
def create_samplesheet(metadata, bam_dir, output_file, check_result, config_file):
# read config file
if len(config_file) == 0:
config_file = os.path.splitext(os.path.abspath(sys.argv[0]))[0] + ".cfg"
else:
if os.path.exists(config_file) == False:
print ("This path is not exists." + config_file)
return
config = ConfigParser.RawConfigParser()
config.read(config_file)
# path check
if os.path.exists(metadata) == False:
print ("path is not exists. [metadata] " + metadata)
return
if len(check_result) > 0 and os.path.exists(check_result) == False:
print ("path is not exists. [check_result] " + check_result)
return
if subcode.path_check(bam_dir, config) == False:
print ("path is not exists. [bam_dir] " + bam_dir)
return
# get path
bam_dir = os.path.abspath(bam_dir)
output_file = os.path.abspath(output_file)
if os.path.splitext(output_file)[1].lower() != ".csv":
print ("Input output file path with format NAME.csv")
return
if os.path.exists(os.path.dirname(output_file)) == False:
os.makedirs(os.path.dirname(output_file))
# read metadata
loaded = subcode.load_metadata(metadata, bam_dir=bam_dir, config=config, check_result=check_result)
for row in loaded["invalid"]:
print (skip_template.format(id = row[0], reason = row[1]))
data_org = subcode.json_to_pandas(loaded["data"])
# multiple diseases?
di1 = data_org.sort_values(by=["disease"])
di2 = di1["disease"][(di1["disease"].duplicated() == False)]
if (len(di2) > 1):
print ("WARNING!!! Mixture of diseases.")
# add sample column, person column, if not exists
#
# for example.
# original barcode TCGA-3H-AB3K-10A-01D-A39U-32
# ---> sample TCGA-3H-AB3K-10
# ---> person TCGA-3H-AB3K
col_sample = []
col_person = []
for i in range(len(data_org)):
split = data_org["barcode"][i].split("-")
#col_sample.append("%s-%s-%s-%s" % (split[0], split[1], split[2], split[3][0:2]))
col_sample.append("%s-%s-%s-%s" % (split[0], split[1], split[2], split[3]))
col_person.append("%s-%s-%s" % (split[0], split[1], split[2]))
if ("sample" in data_org.columns) == False:
add_sample = pandas.DataFrame([col_sample]).T
add_sample.columns =["sample"]
if ("person" in data_org.columns) == False:
add_person = pandas.DataFrame([col_person]).T
add_person.columns =["person"]
data_tmp = pandas.concat([add_sample, add_person, data_org], axis=1)
else:
data_tmp = pandas.concat([add_sample, data_org], axis=1)
# sort
data = data_tmp.sort_values(by=["person", "updated"], ascending=[1, 0])
# get tumor-normal pair
person_list = data["person"][(data["person"].duplicated() == False)]
tumor_list = pandas.DataFrame([])
normal_list = pandas.DataFrame([])
for person in person_list:
one = data[(data["person"] == person)]
tmp_tumor = | pandas.DataFrame([]) | pandas.DataFrame |
import re
import numpy as np
import pytest
from pandas import Categorical, CategoricalIndex, DataFrame, Index, Series
import pandas._testing as tm
from pandas.core.arrays.categorical import recode_for_categories
from pandas.tests.arrays.categorical.common import TestCategorical
class TestCategoricalAPI:
def test_ordered_api(self):
# GH 9347
cat1 = Categorical(list("acb"), ordered=False)
tm.assert_index_equal(cat1.categories, Index(["a", "b", "c"]))
assert not cat1.ordered
cat2 = Categorical(list("acb"), categories=list("bca"), ordered=False)
tm.assert_index_equal(cat2.categories, Index(["b", "c", "a"]))
assert not cat2.ordered
cat3 = Categorical(list("acb"), ordered=True)
tm.assert_index_equal(cat3.categories, Index(["a", "b", "c"]))
assert cat3.ordered
cat4 = Categorical(list("acb"), categories=list("bca"), ordered=True)
tm.assert_index_equal(cat4.categories, Index(["b", "c", "a"]))
assert cat4.ordered
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
assert not cat2.ordered
cat2 = cat.as_ordered()
assert cat2.ordered
cat2.as_unordered(inplace=True)
assert not cat2.ordered
cat2.as_ordered(inplace=True)
assert cat2.ordered
assert cat2.set_ordered(True).ordered
assert not cat2.set_ordered(False).ordered
cat2.set_ordered(True, inplace=True)
assert cat2.ordered
cat2.set_ordered(False, inplace=True)
assert not cat2.ordered
# removed in 0.19.0
msg = "can't set attribute"
with pytest.raises(AttributeError, match=msg):
cat.ordered = True
with pytest.raises(AttributeError, match=msg):
cat.ordered = False
def test_rename_categories(self):
cat = Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
tm.assert_numpy_array_equal(
res.__array__(), np.array([1, 2, 3, 1], dtype=np.int64)
)
tm.assert_index_equal(res.categories, Index([1, 2, 3]))
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
exp_cat = Index(["a", "b", "c"])
tm.assert_index_equal(cat.categories, exp_cat)
# GH18862 (let rename_categories take callables)
result = cat.rename_categories(lambda x: x.upper())
expected = Categorical(["A", "B", "C", "A"])
tm.assert_categorical_equal(result, expected)
# and now inplace
res = cat.rename_categories([1, 2, 3], inplace=True)
assert res is None
tm.assert_numpy_array_equal(
cat.__array__(), np.array([1, 2, 3, 1], dtype=np.int64)
)
tm.assert_index_equal(cat.categories, Index([1, 2, 3]))
@pytest.mark.parametrize("new_categories", [[1, 2, 3, 4], [1, 2]])
def test_rename_categories_wrong_length_raises(self, new_categories):
cat = Categorical(["a", "b", "c", "a"])
msg = (
"new categories need to have the same number of items as the "
"old categories!"
)
with pytest.raises(ValueError, match=msg):
cat.rename_categories(new_categories)
def test_rename_categories_series(self):
# https://github.com/pandas-dev/pandas/issues/17981
c = Categorical(["a", "b"])
result = c.rename_categories(Series([0, 1], index=["a", "b"]))
expected = Categorical([0, 1])
tm.assert_categorical_equal(result, expected)
def test_rename_categories_dict(self):
# GH 17336
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"a": 4, "b": 3, "c": 2, "d": 1})
expected = Index([4, 3, 2, 1])
tm.assert_index_equal(res.categories, expected)
# Test for inplace
res = cat.rename_categories({"a": 4, "b": 3, "c": 2, "d": 1}, inplace=True)
assert res is None
tm.assert_index_equal(cat.categories, expected)
# Test for dicts of smaller length
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"a": 1, "c": 3})
expected = Index([1, "b", 3, "d"])
tm.assert_index_equal(res.categories, expected)
# Test for dicts with bigger length
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6})
expected = Index([1, 2, 3, 4])
tm.assert_index_equal(res.categories, expected)
# Test for dicts with no items from old categories
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"f": 1, "g": 3})
expected = Index(["a", "b", "c", "d"])
tm.assert_index_equal(res.categories, expected)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(
["a", "b", "c", "a"], categories=["c", "b", "a"], ordered=True
)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
tm.assert_categorical_equal(cat, old)
# only res is changed
tm.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
assert res is None
tm.assert_categorical_equal(cat, new)
@pytest.mark.parametrize(
"new_categories",
[
["a"], # not all "old" included in "new"
["a", "b", "d"], # still not all "old" in "new"
["a", "b", "c", "d"], # all "old" included in "new", but too long
],
)
def test_reorder_categories_raises(self, new_categories):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
msg = "items in new_categories are not the same as in old categories"
with pytest.raises(ValueError, match=msg):
cat.reorder_categories(new_categories)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(
["a", "b", "c", "a"], categories=["a", "b", "c", "d"], ordered=True
)
# first inplace == False
res = cat.add_categories("d")
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
tm.assert_categorical_equal(cat, new)
assert res is None
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
tm.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
tm.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
tm.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
tm.assert_categorical_equal(res, expected)
def test_add_categories_existing_raises(self):
# new is in old categories
cat = Categorical(["a", "b", "c", "d"], ordered=True)
msg = re.escape("new categories must not include old categories: {'d'}")
with pytest.raises(ValueError, match=msg):
cat.add_categories(["d"])
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = Index(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
res = cat.set_categories(["c", "b", "a"], inplace=True)
tm.assert_index_equal(cat.categories, exp_categories)
tm.assert_numpy_array_equal(cat.__array__(), exp_values)
assert res is None
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
tm.assert_index_equal(cat.categories, exp_categories)
tm.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = Index(["a", "b", "c"])
tm.assert_index_equal(res.categories, exp_categories_back)
tm.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
tm.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0], dtype=np.int8))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
tm.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0], dtype=np.int8))
tm.assert_index_equal(res.categories, Index(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = Index(["a", "b", "c", "d"])
tm.assert_index_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0], dtype=np.int8))
tm.assert_index_equal(c.categories, Index([1, 2, 3, 4]))
exp = np.array([1, 2, 3, 4, 1], dtype=np.int64)
tm.assert_numpy_array_equal(np.asarray(c), exp)
# all "pointers" to '4' must be changed from 3 to 0,...
c = c.set_categories([4, 3, 2, 1])
# positions are changed
tm.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3], dtype=np.int8))
# categories are now in new order
tm.assert_index_equal(c.categories, Index([4, 3, 2, 1]))
# output is the same
exp = np.array([1, 2, 3, 4, 1], dtype=np.int64)
tm.assert_numpy_array_equal(np.asarray(c), exp)
assert c.min() == 4
assert c.max() == 1
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
assert not c2.ordered
tm.assert_numpy_array_equal(np.asarray(c), np.asarray(c2))
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
assert not c2.ordered
tm.assert_numpy_array_equal(np.asarray(c), np.asarray(c2))
def test_to_dense_deprecated(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
with tm.assert_produces_warning(FutureWarning):
cat.to_dense()
@pytest.mark.parametrize(
"values, categories, new_categories",
[
# No NaNs, same cats, same order
(["a", "b", "a"], ["a", "b"], ["a", "b"]),
# No NaNs, same cats, different order
(["a", "b", "a"], ["a", "b"], ["b", "a"]),
# Same, unsorted
(["b", "a", "a"], ["a", "b"], ["a", "b"]),
# No NaNs, same cats, different order
(["b", "a", "a"], ["a", "b"], ["b", "a"]),
# NaNs
(["a", "b", "c"], ["a", "b"], ["a", "b"]),
(["a", "b", "c"], ["a", "b"], ["b", "a"]),
(["b", "a", "c"], ["a", "b"], ["a", "b"]),
(["b", "a", "c"], ["a", "b"], ["a", "b"]),
# Introduce NaNs
(["a", "b", "c"], ["a", "b"], ["a"]),
(["a", "b", "c"], ["a", "b"], ["b"]),
(["b", "a", "c"], ["a", "b"], ["a"]),
(["b", "a", "c"], ["a", "b"], ["a"]),
# No overlap
(["a", "b", "c"], ["a", "b"], ["d", "e"]),
],
)
@pytest.mark.parametrize("ordered", [True, False])
def test_set_categories_many(self, values, categories, new_categories, ordered):
c = Categorical(values, categories)
expected = Categorical(values, new_categories, ordered)
result = c.set_categories(new_categories, ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_set_categories_rename_less(self):
# GH 24675
cat = Categorical(["A", "B"])
result = cat.set_categories(["A"], rename=True)
expected = | Categorical(["A", np.nan]) | pandas.Categorical |
from __future__ import absolute_import
import random
import time
import logbook
import pandas as pd
import requests
from cnswd.websource.base import friendly_download
from cnswd.websource._selenium import make_headless_browser
log = logbook.Logger('提取成交明细网页数据')
BASE_URL_FMT = 'http://vip.stock.finance.sina.com.cn/quotes_service/view/vMS_tradehistory.php?symbol={symbol}&date={date_str}'
DATE_FMT = '%Y-%-m-%-d' # 不填充0
def _add_prefix(stock_code):
"""查询代码"""
pre = stock_code[0]
if pre == '6':
return 'sh{}'.format(stock_code)
else:
return 'sz{}'.format(stock_code)
def _to_str(date):
"""转换为查询日期格式"""
# Visual Studio 不能直接处理
# return pd.Timestamp(date).strftime(DATE_FMT)
dt_stru = pd.Timestamp(date).timetuple()
return str(dt_stru.tm_year) + '-' + str(dt_stru.tm_mon) + '-' + str(dt_stru.tm_mday)
def _query_url(code, date):
"""查询url"""
symbol = _add_prefix(code)
date_str = _to_str(date)
return BASE_URL_FMT.format(symbol=symbol, date_str=date_str)
def _fix_data(df, code, date):
"""整理数据框"""
df.columns = ['成交时间', '成交价', '价格变动', '成交量', '成交额', '性质']
date_str = _to_str(date)
df.成交时间 = df.成交时间.map(lambda x: pd.Timestamp('{} {}'.format(date_str, x)))
df['股票代码'] = code
# df['涨跌幅'] = df['涨跌幅'].str.replace('%', '').astype(float) * 0.01
df['成交量'] = df['成交量'] * 100
df = df.sort_values('成交时间')
return df
def _get_cjmx_1(code, date):
url_fmt = 'http://vip.stock.finance.sina.com.cn/quotes_service/view/vMS_tradehistory.php?symbol={symbol_}&date={date_str}&page={page}'
dfs = []
symbol_ = _add_prefix(code)
d = pd.Timestamp(date)
if d < pd.Timestamp('today').normalize() - pd.Timedelta(days=20):
raise NotImplementedError('尚未完成')
for i in range(1, 1000):
url = url_fmt.format(symbol_=symbol_, date_str=d.strftime(r'%Y-%m-%d'), page=i)
r = requests.get(url)
r.encoding = 'gb18030'
# 当天不交易时,返回空`DataFrame`对象
try:
df = pd.read_html(r.text, attrs={'id': 'datatbl'}, na_values=['--'])[0]
except ValueError:
return pd.DataFrame()
if '没有交易数据' in df.iat[0, 0]:
df = pd.DataFrame()
break
dfs.append(df)
df = pd.concat(dfs)
del df['涨跌幅']
return _fix_data(df, code, date)
def _get_cjmx_2(browser, code, date, page_sleep=0.2):
"""获取指定日期股票历史成交明细"""
url = _query_url(code, date)
browser.get(url)
# time.sleep(0.3)
# 主页信息
# 如果反馈信息有提示,代表当日没有数据,否则提示为''
msg = browser.find_element_by_css_selector('.msg').text # 所选日期非交易日,请重新选择
if msg != '':
return pd.DataFrame()
# 以下在子框架内操作
browser.switch_to.frame('list_frame')
if '输入的代码有误或没有交易数据' in browser.page_source:
return pd.DataFrame()
dfs = []
# 排除第一项div元素
num = len(browser.find_elements_by_css_selector('.pages > div')) - 1
## 然后移动时间线
css_fmt = '.pages > div:nth-child({}) > a'
for i in range(num):
css = css_fmt.format(i + 2)
target = browser.find_element_by_css_selector(css)
browser.execute_script("arguments[0].click();", target)
time.sleep(page_sleep) # 休眠时间取决于本机环境
df = pd.read_html(browser.page_source, attrs={
'id': 'datatbl'}, na_values=['--'])[0]
dfs.append(df)
df = pd.concat(dfs)
return _fix_data(df, code, date)
@friendly_download(10)
def get_cjmx(code, date, browser=None):
"""获取股票指定日期成交明细"""
d = | pd.Timestamp(date) | pandas.Timestamp |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = pd.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = pd.unique(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index([Timestamp('20160101', tz='US/Eastern'),
| Timestamp('20160101', tz='US/Eastern') | pandas.Timestamp |
"""Tests of :mod:`message_ix_models.util`."""
import logging
import re
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from iam_units import registry
from message_ix import Scenario, make_df
from pandas.testing import assert_series_equal
from message_ix_models import ScenarioInfo
from message_ix_models.util import (
MESSAGE_DATA_PATH,
MESSAGE_MODELS_PATH,
as_codes,
broadcast,
check_support,
convert_units,
copy_column,
ffill,
iter_parameters,
load_package_data,
load_private_data,
local_data_path,
make_source_tech,
maybe_query,
package_data_path,
private_data_path,
series_of_pint_quantity,
)
_actual_package_data = Path(__file__).parents[1].joinpath("data")
def test_as_codes():
"""Forward reference to a child is silently dropped."""
data = dict(
foo=dict(child=["bar"]),
bar=dict(name="Bar!"),
)
result = as_codes(data)
assert result[1] not in result[0].child
# With Codes already, the function is a pass-through
assert result == as_codes(result)
def test_broadcast(caplog):
# Debug message logged with length-0 values
with caplog.at_level(logging.DEBUG, logger="message_ix_models"):
broadcast(pd.DataFrame(columns=["foo", "bar"]), foo=[], bar=[])
assert "Don't broadcast over 'foo'; labels [] have length 0" in caplog.messages
# TODO expand
@pytest.mark.parametrize(
"data",
(
set(),
# dict() with a value that is not a str or a further dict()
dict(foo="foo", bar=[1, 2, 3]),
),
)
def test_as_codes_invalid(data):
"""as_codes() rejects invalid data."""
with pytest.raises(TypeError):
as_codes(data)
def test_check_support(test_context):
""":func:`.check_support` raises an exception for missing/non-matching values."""
args = [test_context, dict(regions=["R11", "R14"]), "Test data available"]
# Setting not set → KeyError
with pytest.raises(KeyError, match="regions"):
check_support(*args)
# Accepted value
test_context.regions = "R11"
check_support(*args)
# Wrong setting
test_context.regions = "FOO"
with pytest.raises(
NotImplementedError,
match=re.escape("Test data available for ['R11', 'R14']; got 'FOO'"),
):
check_support(*args)
def test_convert_units(recwarn):
""":func:`.convert_units` and :func:`.series_of_pint_quantity` work."""
# Common arguments
args = [pd.Series([1.1, 10.2, 100.3], name="bar"), dict(bar=(10.0, "lb", "kg"))]
exp = series_of_pint_quantity(
[registry("4.9895 kg"), registry("46.2664 kg"), registry("454.9531 kg")],
)
# With store="quantity", a series of pint.Quantity is returned
result = convert_units(*args, store="quantity")
assert all(np.isclose(a, b, atol=1e-4) for a, b in zip(exp.values, result.values))
# With store="magnitude", a series of floats
exp = pd.Series([q.magnitude for q in exp.values], name="bar")
assert_series_equal(exp, convert_units(*args, store="magnitude"), check_dtype=False)
# Other values for store= are errors
with pytest.raises(ValueError, match="store='foo'"):
convert_units(*args, store="foo")
# series_of_pint_quantity() successfully caught warnings
assert 0 == len(recwarn)
def test_copy_column():
df = | pd.DataFrame([[0, 1], [2, 3]], columns=["a", "b"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.svm import SVC
from sklearn.ensemble import GradientBoostingClassifier,RandomForestClassifier
from sklearn.metrics import f1_score
class Submission():
def __init__(self, train_data_path, test_data_path):
self.train_data = pd.read_csv(train_data_path, header=None)
self.test_data = pd.read_csv(test_data_path)
def predict(self):
# Split the training data into x and y
X_train,y_train = self.train_data.iloc[:,:-1], self.train_data.iloc[:,-1]
# Train the model
classifier = RandomForestClassifier(random_state = 1)
classifier.fit(X_train, y_train)
# print(f1_score(classifier.predict(X_train),y_train,average = 'weighted'))
# Predict on test set and save the prediction
submission = classifier.predict(self.test_data)
submission = | pd.DataFrame(submission) | pandas.DataFrame |
## http://maths-people.anu.edu.au/~johnm/courses/mathdm/talks/dimitri-clickadvert.pdf
## Grobal Features
## Brightness, Saturation, Colorfulness, Naturalness, Contrast, Sharpness,
## Texture, Grayscale simplicity, RGB simplicity, Color harmony, Hue histogram
## Local Features
## Basic segment statistics, Segment hue histogram, Segment color harmony, Segment brightness
## High level FEatures
## Interest points, Saliency map, Text, Human faces
import os
import numpy as np
import pandas as pd
import cv2
from tqdm import tqdm
import gzip
import gc
from scipy.stats import skew, kurtosis, entropy
from joblib import Parallel, delayed
from scipy.ndimage import sobel
from utils import *
import pytesseract
import warnings
from saliency import Saliency
warnings.filterwarnings("ignore")
# train_file_dir = '../input/train_jpg'
# test_file_dir = '../input/test_jpg'
train_file_dir = '../input/data/competition_files/train_jpg'
test_file_dir = '../input/data/competition_files/test_jpg'
from PIL import Image
import cv2
def getstats(arr):
ave = np.mean(arr)
std = np.std(arr)
ske = skew(arr.ravel())
kur = kurtosis(arr.ravel())
ent = entropy(arr.ravel())
return [ave, std, ske, kur, ent]
def get_interest_points(arr):
size = arr.shape[0] * arr.shape[1] / 100
gray= cv2.cvtColor(arr,cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
kp = sift.detect(gray,None)
return [len(kp), len(kp)/size]
def get_saliency_map(arr):
size = arr.shape[0] * arr.shape[1]
sali = Saliency(arr)
m = sali.get_saliency_map()
sali_stats = getstats(m)
cnt = (m>0.80).sum()
cnt2 = (m>0.9).sum()
cnt3 = (m<0.1).sum()
cnt4 = (m<0.2).sum()
return sali_stats + [cnt, cnt/size, cnt2, cnt2/size, cnt3, cnt3/size, cnt4, cnt4/size]
def get_data_from_image(image_path, f):
try:
cv_img = cv2.imread(image_path)
bw = cv2.imread(image_path,0)
yuv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2YUV)
hls_img = cv2.cvtColor(cv_img, cv2.COLOR_RGB2HLS)
hsv_img = cv2.cvtColor(cv_img, cv2.COLOR_RGB2HSV)
img_size_x,img_size_y = cv_img.shape[0], cv_img.shape[1]
img_size = img_size_x * img_size_y
pixels = cv_img.shape[0] * cv_img.shape[1] * cv_img.shape[2]
except:
return [0] * num_feat
#Saliency map,Interest points
interest_points = get_interest_points(cv_img)
saliency_map = get_saliency_map(cv_img)
output = interest_points + saliency_map
return output
def get_features(f, filedir):
if f=="NAN":
return [np.nan] * num_feat
else:
image_name = os.path.join(filedir, f+'.jpg')
return get_data_from_image(image_name, f)
cols = ["interest_points_1", "interest_points_2"] + ["saliency_map_{}".format(i+1) for i in range(13)]
num_feat = len(cols)
print("NUM Features", num_feat)
print("train data...")
train_image_ids = pd.read_csv("../input/train.csv", usecols=["image"])["image"].fillna("NAN").tolist()
file_dir = train_file_dir
out = Parallel(n_jobs=-1, verbose=1)([delayed(get_features)(f,file_dir) for f in train_image_ids])
df_out = pd.DataFrame(out, columns=cols)
del out,train_image_ids; gc.collect()
to_parquet(df_out, "../features/fe_img_basic_3_features_train.parquet")
del df_out; gc.collect()
print("test data...")
test_image_ids = | pd.read_csv("../input/test.csv", usecols=["image"]) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in | signature(f) | pandas.compat.signature |
# python rl-policy-generator.py --msa_name SanFrancisco --rel_result True --epochs 100
import setproctitle
setproctitle.setproctitle("gnn-simu-vac@chenlin")
from utils import *
import argparse
import os
import sys
import networkx as nx
import igraph as ig
import numpy as np
import pandas as pd
from sklearn import preprocessing
from models import get_model
from config import *
import torch
import torch.optim as optim
import random
import datetime
from torch.distributions import Categorical
import multiprocessing
import time
import pdb
sys.path.append(os.path.join(os.getcwd(), '../gt-generator'))
import constants
import functions
import disease_model
# 限制显卡使用
#os.environ["CUDA_VISIBLE_DEVICES"] = "2" #"1"
torch.cuda.set_device(0) #1 #nvidia-smi
############################################################################################
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--epochs', type=int, default=200,
help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.01,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=32, #100,#400, #default=16(original)
help='Number of hidden units.')
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout rate (1 - keep probability).')
#20220113
parser.add_argument('--gt_root', default=os.path.abspath(os.path.join(os.pardir,'data/safegraph')),
help='Path to ground truth .csv files.')
parser.add_argument('--msa_name',
help='MSA name.')
#20220118
parser.add_argument('--normalize', default = True,
help='Whether normalize node features or not.')
parser.add_argument('--rel_result', default = False, action='store_true',
help='Whether retrieve results relative to no_vac.')
#20220123
parser.add_argument('--prefix', default= '/home',
help='Prefix of data root. /home for rl4, /data for dl3, /data4 for dl2.')
#20220127
parser.add_argument('--trained_evaluator_folder', default= 'chenlin/pygcn/pygcn/trained_model',
help='Folder to reload trained evaluator model.')
# 20220129
parser.add_argument('--vaccination_ratio', default=0.01, #0.02
help='Vaccination ratio (w.r.t. total population).')
parser.add_argument('--vaccination_time', default=0, #31
help='Time to distribute vaccines.')
parser.add_argument('--NN', type=int,
help='Number of CBGs to receive vaccines.')
# 20220203
parser.add_argument('--quicktest', default= False, action='store_true',
help='If true, perform only 2 simulations in traditional_evaluate(); else 40.')
# 20220204
parser.add_argument('--epoch_width', default=1000, type=int,
help='Num of samples in an epoch.')
parser.add_argument('--model_save_folder', default= 'chenlin/pygcn/pygcn/trained_model',
help='Folder to save trained model.')
# 20220205
parser.add_argument('--simulation_cache_filename', default='chenlin/pygcn/pygcn/simulation_cache_temp.pkl',
help='File to save traditional_simulate results.')
parser.add_argument('--replay_width', type=int, default=2,
help='Num of experienced actions to be replayed.')
parser.add_argument('--replay_buffer_capacity',type=int,default=200,
help='Maximum number of vaccine policy to be stored in replay buffer.')
# 20220206
parser.add_argument('--simulation_cache_folder', default='chenlin/pygcn/pygcn',
help='Folder to save traditional_simulate results.')
parser.add_argument('--save_checkpoint', default=False, action='store_true',
help='If true, save best checkpoint and final model to .pt file.')
args = parser.parse_args()
# Check important parameters
args.cuda = not args.no_cuda and torch.cuda.is_available()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda: torch.cuda.manual_seed(args.seed)
print('args.rel_result: ', args.rel_result)
print('args.quicktest: ', args.quicktest)
print('args.epochs: ', args.epochs)
print('args.epoch_width: ', args.epoch_width)
print('args.replay_width: ', args.replay_width)
print('args.save_checkpoint: ', args.save_checkpoint)
#evaluator_path = os.path.join(args.prefix, args.trained_evaluator_folder, 'total_cases_20220126.pt')
#evaluator_path = os.path.join(args.prefix, args.trained_evaluator_folder, 'total_cases_of_250epochs_20220131.pt')
evaluator_path = os.path.join(args.prefix, args.trained_evaluator_folder, 'total_cases_of_100epochs_20220203.pt')
print('evaluator_path: ', evaluator_path)
today = str(datetime.date.today()).replace('-','') # yyyy-mm-dd -> yyyymmdd
print('today: ', today)
checkpoint_save_path = os.path.join(args.prefix, args.model_save_folder, f'checkpoint_generator_maxreward_{today}.pt')
print('checkpoint_save_path: ', checkpoint_save_path)
simulation_cache_save_path = os.path.join(args.prefix, args.simulation_cache_filename)
print('simulation_cache_save_path: ', simulation_cache_save_path)
cache_dict = multiprocessing.Manager().dict()
# Load simulation cache to accelarate training #20220205
dict_path_list = ['simulation_cache_combined.pkl',
'simulation_cache_temp.pkl'
#'simulation_cache_1.pkl',
#'simulation_cache_2.pkl',
#'simulation_cache_3.pkl',
#'simulation_cache_202202061756.pkl',
#'simulation_cache_202202070130.pkl',
#'simulation_cache_202202070454.pkl'
] #20220206
combined_dict = dict()
for dict_path in dict_path_list:
if(os.path.exists(dict_path)):
with open(os.path.join(args.prefix, args.simulation_cache_folder,dict_path), 'rb') as f:
new_dict = pickle.load(f)
combined_dict = {**combined_dict,**new_dict}
print(f'len(new_dict): {len(new_dict)}')
print(f'len(combined_dict): {len(combined_dict)}')
pdb.set_trace()
with open(os.path.join(args.prefix, 'chenlin/pygcn/pygcn/simulation_cache_combined.pkl'), 'wb') as f:
pickle.dump(combined_dict, f)
###############################################################################
# Load traditional simulator
epic_data_root = f'{args.prefix}/chenlin/COVID-19/Data'
mob_data_root = f'{args.prefix}/chenlin/COVID-19/Data' #Path to mobility data.
# Vaccination protection rate
PROTECTION_RATE = 1
# Policy execution ratio
EXECUTION_RATIO = 1
MSA_NAME_FULL = constants.MSA_NAME_FULL_DICT[args.msa_name]
# Random Seed
if(args.quicktest): NUM_SEEDS = 5 #2
else: NUM_SEEDS = 40
print('NUM_SEEDS: ', NUM_SEEDS)
STARTING_SEED = range(NUM_SEEDS)
# Load POI-CBG visiting matrices
f = open(os.path.join(epic_data_root, args.msa_name, '%s_2020-03-01_to_2020-05-02.pkl'%MSA_NAME_FULL), 'rb')
poi_cbg_visits_list = pickle.load(f)
f.close()
# Load precomputed parameters to adjust(clip) POI dwell times
d = pd.read_csv(os.path.join(epic_data_root,args.msa_name, 'parameters_%s.csv' % args.msa_name))
# No clipping
MIN_DATETIME = datetime.datetime(2020, 3, 1, 0)
MAX_DATETIME = datetime.datetime(2020, 5, 2, 23)
all_hours = functions.list_hours_in_range(MIN_DATETIME, MAX_DATETIME)
poi_areas = d['feet'].values#面积
poi_dwell_times = d['median'].values#平均逗留时间
poi_dwell_time_correction_factors = (poi_dwell_times / (poi_dwell_times+60)) ** 2
del d
# Load CBG ids for the MSA
cbg_ids_msa = pd.read_csv(os.path.join(epic_data_root,args.msa_name,'%s_cbg_ids.csv'%MSA_NAME_FULL))
cbg_ids_msa.rename(columns={"cbg_id":"census_block_group"}, inplace=True)
num_cbgs = len(cbg_ids_msa)
# Mapping from cbg_ids to columns in hourly visiting matrices
cbgs_to_idxs = dict(zip(cbg_ids_msa['census_block_group'].values, range(num_cbgs)))
x = {}
for i in cbgs_to_idxs:
x[str(i)] = cbgs_to_idxs[i]
idxs_msa_all = list(x.values())
# Load SafeGraph data to obtain CBG sizes (i.e., populations)
filepath = os.path.join(epic_data_root,"safegraph_open_census_data/data/cbg_b01.csv")
cbg_agesex = pd.read_csv(filepath)
# Extract CBGs belonging to the MSA - https://covid-mobility.stanford.edu//datasets/
cbg_age_msa = | pd.merge(cbg_ids_msa, cbg_agesex, on='census_block_group', how='left') | pandas.merge |
import pandas as pd
import numpy as np
#load Data
node1_record = pd.read_csv("../Data/Node1_5SecData.csv")
node1_record.AbsT = | pd.to_datetime(node1_record.AbsT) | pandas.to_datetime |
'''
'''
import pandas as pd
import os
import json
import numpy
class Encoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, numpy.integer):
return int(obj)
elif isinstance(obj, numpy.floating):
return float(obj)
elif isinstance(obj, numpy.ndarray):
return obj.tolist()
else:
return super().default(obj)
def write_json(conn, table_name, destination=""):
'''
'''
chunks = []
for chunk in | pd.read_sql_table(table_name, conn, chunksize=1000) | pandas.read_sql_table |
# -*- coding: utf-8 -*-
"""
author: zengbin93
email: <EMAIL>
create_dt: 2022/5/5 23:09
describe: 基于Tushare数据的仿真跟踪
"""
import os
import inspect
import traceback
import pandas as pd
from tqdm import tqdm
from typing import Callable, List
from czsc import envs
from czsc.data import TsDataCache, freq_cn2ts
from czsc.utils import BarGenerator, dill_load, dill_dump
from czsc.objects import RawBar
from czsc.traders.advanced import CzscAdvancedTrader
from czsc.traders.performance import PairsPerformance
| pd.set_option('display.max_rows', 600) | pandas.set_option |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
import random
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_print_temp_data():
target = mock.MagicMock()
target.temp={}
target.temp['selected'] = ['c1','c2']
target.temp['weights'] = [0.5,0.5]
algo = algos.PrintTempData()
assert algo( target )
algo = algos.PrintTempData( 'Selected: {selected}')
assert algo( target )
def test_print_info():
target = bt.Strategy('s', [])
target.temp={}
algo = algos.PrintInfo()
assert algo( target )
algo = algos.PrintInfo( '{now}: {name}')
assert algo( target )
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
def test_run_if_out_of_bounds():
algo = algos.RunIfOutOfBounds(0.5)
dts = pd.date_range('2010-01-01', periods=3)
s = bt.Strategy('s')
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.temp['selected'] = ['c1', 'c2']
s.temp['weights'] = {'c1': .5, 'c2':.5}
s.update(dts[0])
s.children['c1'] = bt.core.SecurityBase('c1')
s.children['c2'] = bt.core.SecurityBase('c2')
s.children['c1']._weight = 0.5
s.children['c2']._weight = 0.5
assert not algo(s)
s.children['c1']._weight = 0.25
s.children['c2']._weight = 0.75
assert not algo(s)
s.children['c1']._weight = 0.24
s.children['c2']._weight = 0.76
assert algo(s)
s.children['c1']._weight = 0.75
s.children['c2']._weight = 0.25
assert not algo(s)
s.children['c1']._weight = 0.76
s.children['c2']._weight = 0.24
assert algo(s)
def test_run_after_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDate('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-03')
assert algo(target)
def test_run_after_days():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDays(3)
assert not algo(target)
assert not algo(target)
assert not algo(target)
assert algo(target)
def test_set_notional():
algo = algos.SetNotional('notional')
s = bt.FixedIncomeStrategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
notional = pd.Series(index=dts[:2], data=[1e6, 5e6])
s.setup( data, notional = notional )
s.update(dts[0])
assert algo(s)
assert s.temp['notional_value'] == 1e6
s.update(dts[1])
assert algo(s)
assert s.temp['notional_value'] == 5e6
s.update(dts[2])
assert not algo(s)
def test_rebalance():
algo = algos.Rebalance()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c1 = s['c1']
assert c1.value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000
assert c2.position == 10
assert c2.weight == 1.
def test_rebalance_with_commissions():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 999
assert s.capital == 99
c1 = s['c1']
assert c1.value == 900
assert c1.position == 9
assert c1.weight == 900 / 999.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 997
assert s.capital == 97
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 900
assert c2.position == 9
assert c2.weight == 900. / 997
def test_rebalance_with_cash():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
# set cash amount
s.temp['cash'] = 0.5
assert algo(s)
assert s.value == 999
assert s.capital == 599
c1 = s['c1']
assert c1.value == 400
assert c1.position == 4
assert c1.weight == 400.0 / 999
s.temp['weights'] = {'c2': 1}
# change cash amount
s.temp['cash'] = 0.25
assert algo(s)
assert s.value == 997
assert s.capital == 297
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 700
assert c2.position == 7
assert c2.weight == 700.0 / 997
def test_rebalance_updatecount():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.use_integer_positions(False)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4','c5'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
assert s.value == 1000
assert s.capital == 0
# Update is called once when each weighted security is created (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[1])
s.temp['weights'] = {'c1': 0.5, 'c2':0.5}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[2])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (2)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 6
def test_rebalance_fixedincome():
algo = algos.Rebalance()
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
s = bt.FixedIncomeStrategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
coupons = pd.DataFrame(index=dts, columns=['c2'], data=0)
s.setup(data, coupons=coupons)
s.update(dts[0])
s.temp['notional_value'] = 1000
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000
c1 = s['c1']
assert c1.value == 1000
assert c1.notional_value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000*100
c2 = s['c2']
assert c1.value == 0
assert c1.notional_value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000*100
assert c2.notional_value == 1000
assert c2.position == 1000
assert c2.weight == 1.
def test_select_all():
algo = algos.SelectAll()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectAll(include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectAll(include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly_n_none():
algo = algos.SelectRandomly(n=None) # Behaves like SelectAll
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectRandomly(n=None, include_no_data=True)
assert algo2(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectRandomly(n=None, include_negative=True)
assert algo3(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100.)
data['c1'][dts[0]] = np.nan
data['c2'][dts[0]] = 95
data['c3'][dts[0]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectRandomly(n=1)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
random.seed(1000)
algo = algos.SelectRandomly(n=1, include_negative=True)
assert algo(s)
assert s.temp.pop('selected') == ['c3']
random.seed(1009)
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c1']
random.seed(1009)
# If selected already set, it will further filter it
s.temp['selected'] = ['c2']
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
def test_select_these():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
algo = algos.SelectThese( ['c1'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectThese( ['c1', 'c2'], include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectThese(['c1', 'c2'], include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where_all():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
s.setup(data, where = where)
s.update(dts[0])
algo = algos.SelectWhere('where')
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectWhere('where', include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectWhere('where', include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere('where')
s.setup(data, where=where)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_where_legacy():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere(where)
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_regex():
s = bt.Strategy('s')
algo = algos.SelectRegex( 'c1' )
s.temp['selected'] = ['a1', 'c1', 'c2', 'c11', 'cc1']
assert algo( s )
assert s.temp['selected'] == ['c1', 'c11', 'cc1']
algo = algos.SelectRegex( '^c1$' )
assert algo( s )
assert s.temp['selected'] == ['c1']
def test_resolve_on_the_run():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'b1'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c2'][dts[2]] = -5
on_the_run = pd.DataFrame(index=dts, columns=['c'], data='c1')
on_the_run.loc[dts[2], 'c'] = 'c2'
s.setup(data, on_the_run = on_the_run)
s.update(dts[0])
s.temp['selected'] = ['c', 'b1']
algo = algos.ResolveOnTheRun( 'on_the_run' )
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# make sure don't keep nan
s.update(dts[1])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
# if specify include_no_data then 2
algo2 = algos.ResolveOnTheRun('on_the_run', include_no_data=True)
s.temp['selected'] = ['c', 'b1']
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# behavior on negative prices
s.update(dts[2])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
algo3 = algos.ResolveOnTheRun('on_the_run', include_negative=True)
s.temp['selected'] = ['c', 'b1']
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c2' in selected
assert 'b1' in selected
def test_select_types():
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
c3 = bt.HedgeSecurity('c3')
c4 = bt.CouponPayingHedgeSecurity('c4')
c5 = bt.FixedIncomeSecurity('c5')
s = bt.Strategy('p', children = [c1, c2, c3, c4, c5])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4', 'c5'], data=100.)
coupons = pd.DataFrame(index=dts, columns=['c2', 'c4'], data=0.)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
algo = algos.SelectTypes(include_types=(bt.Security, bt.HedgeSecurity), exclude_types=())
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3'])
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,), exclude_types=(bt.CouponPayingSecurity,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3', 'c5'])
s.temp['selected'] = ['c1', 'c2', 'c3']
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c2', 'c3'])
def test_weight_equally():
algo = algos.WeighEqually()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.update(dts[0])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.5
assert 'c2' in weights
assert weights['c2'] == 0.5
def test_weight_specified():
algo = algos.WeighSpecified(c1=0.6, c2=0.4)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.6
assert 'c2' in weights
assert weights['c2'] == 0.4
def test_scale_weights():
s = bt.Strategy('s')
algo = algos.ScaleWeights( -0.5 )
s.temp['weights'] = {'c1': 0.5, 'c2': -0.4, 'c3':0 }
assert algo( s )
assert s.temp['weights'] == {'c1':-0.25, 'c2':0.2, 'c3':0}
def test_select_has_data():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=10)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[0]] = np.nan
data['c1'].loc[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
def test_select_has_data_preselected():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = | pd.date_range('2010-01-01', periods=3) | pandas.date_range |
import pandas as pd
from Deep_learning.model import *
from Deep_learning.data_aux import *
tf.logging.set_verbosity(tf.logging.INFO)
# Parameters
TRAIN_PATH = '../Data/train_numerical.csv'
VALIDATION_PATH = '../Data/val_numerical.csv'
TEST_PATH = '../Data/test_numerical.csv'
MODEL_NAME = 'model_numerical'
MODEL_DIR = '../Model backlog/Tensorflow/' + MODEL_NAME
SUBMISSION_NAME = ('submission_%s.csv' % MODEL_NAME)
# Model parameters
LEARNING_RATE = 0.001
HIDDEN_UNITS = [32, 16]
TRAIN_STEPS = 50000
VALIDATION_STEPS = 1000
BATCH_SIZE = 128
CSV_COLUMNS = ['AVProductsEnabled', 'AVProductsInstalled', 'Census_InternalPrimaryDiagonalDisplaySizeInInches',
'Census_InternalPrimaryDisplayResolutionVertical', 'Census_ProcessorCoreCount',
'Census_SystemVolumeTotalCapacity', 'Census_TotalPhysicalRAM', 'RtpStateBitfield', 'HasDetections']
LABEL_COLUMN = 'HasDetections'
DEFAULTS = [[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0]]
INPUT_COLUMNS = [
# Raw data columns
tf.feature_column.numeric_column('AVProductsEnabled'),
tf.feature_column.numeric_column('AVProductsInstalled'),
tf.feature_column.numeric_column('Census_InternalPrimaryDiagonalDisplaySizeInInches'),
tf.feature_column.numeric_column('Census_InternalPrimaryDisplayResolutionVertical'),
tf.feature_column.numeric_column('Census_ProcessorCoreCount'),
tf.feature_column.numeric_column('Census_SystemVolumeTotalCapacity'),
tf.feature_column.numeric_column('Census_TotalPhysicalRAM'),
tf.feature_column.numeric_column('RtpStateBitfield')
]
train_spec = tf.estimator.TrainSpec(input_fn=read_dataset(TRAIN_PATH, mode=tf.estimator.ModeKeys.TRAIN,
features_cols=CSV_COLUMNS, label_col=LABEL_COLUMN,
default_value=DEFAULTS, batch_size=BATCH_SIZE),
max_steps=TRAIN_STEPS)
eval_spec = tf.estimator.EvalSpec(input_fn=read_dataset(VALIDATION_PATH, mode=tf.estimator.ModeKeys.EVAL,
features_cols=CSV_COLUMNS, label_col=LABEL_COLUMN,
default_value=DEFAULTS, batch_size=BATCH_SIZE),
steps=VALIDATION_STEPS, throttle_secs=600)
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)
estimator = build_deep_estimator(MODEL_DIR, HIDDEN_UNITS, optimizer, INPUT_COLUMNS)
tf.estimator.train_and_evaluate(estimator, train_spec=train_spec, eval_spec=eval_spec)
# Make predictions
dtypes = {
'MachineIdentifier': 'category',
'ProductName': 'category',
'EngineVersion': 'category',
'RtpStateBitfield': 'float16',
'IsSxsPassiveMode': 'int8',
'AVProductsInstalled': 'float16',
'AVProductsEnabled': 'float16',
'OrganizationIdentifier': 'float16',
'Platform': 'category',
'Processor': 'category',
'OsVer': 'category',
'OsSuite': 'int16',
'OsPlatformSubRelease': 'category',
'SkuEdition': 'category',
'IsProtected': 'float16',
'SmartScreen': 'category',
'UacLuaenable': 'float32',
'Census_MDC2FormFactor': 'category',
'Census_DeviceFamily': 'category',
'Census_ProcessorCoreCount': 'float16',
'Census_ProcessorManufacturerIdentifier': 'float16',
'Census_PrimaryDiskTypeName': 'category',
'Census_SystemVolumeTotalCapacity': 'float32',
'Census_TotalPhysicalRAM': 'float32',
'Census_ChassisTypeName': 'category',
'Census_InternalPrimaryDiagonalDisplaySizeInInches': 'float16',
'Census_InternalPrimaryDisplayResolutionVertical': 'float16',
'Census_PowerPlatformRoleName': 'category',
'Census_OSArchitecture': 'category',
'Census_OSBranch': 'category',
'Census_OSEdition': 'category',
'Census_OSSkuName': 'category',
'Census_OSInstallTypeName': 'category',
'Census_OSInstallLanguageIdentifier': 'float16',
'Census_OSWUAutoUpdateOptionsName': 'category',
'Census_GenuineStateName': 'category',
'Census_ActivationChannel': 'category',
'Census_FlightRing': 'category',
'Census_IsVirtualDevice': 'float16',
'Census_IsTouchEnabled': 'int8',
'Census_IsAlwaysOnAlwaysConnectedCapable': 'float16',
'Wdft_IsGamer': 'float16',
'Wdft_RegionIdentifier': 'float16',
'HasDetections': 'int8'
}
test_raw = | pd.read_csv(TEST_PATH, dtype=dtypes) | pandas.read_csv |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import numpy as np
import pandas
from pandas.core.common import is_bool_indexer
from pandas.core.indexing import check_bool_indexer
from pandas.core.dtypes.common import (
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_scalar,
)
from pandas.core.base import DataError
import warnings
from modin.backends.base.query_compiler import BaseQueryCompiler
from modin.error_message import ErrorMessage
from modin.utils import try_cast_to_pandas, wrap_udf_function
from modin.data_management.functions import (
FoldFunction,
MapFunction,
MapReduceFunction,
ReductionFunction,
BinaryFunction,
GroupbyReduceFunction,
)
def _get_axis(axis):
if axis == 0:
return lambda self: self._modin_frame.index
else:
return lambda self: self._modin_frame.columns
def _set_axis(axis):
if axis == 0:
def set_axis(self, idx):
self._modin_frame.index = idx
else:
def set_axis(self, cols):
self._modin_frame.columns = cols
return set_axis
def _str_map(func_name):
def str_op_builder(df, *args, **kwargs):
str_s = df.squeeze(axis=1).str
return getattr(pandas.Series.str, func_name)(str_s, *args, **kwargs).to_frame()
return str_op_builder
def _dt_prop_map(property_name):
"""
Create a function that call property of property `dt` of the series.
Parameters
----------
property_name
The property of `dt`, which will be applied.
Returns
-------
A callable function to be applied in the partitions
Notes
-----
This applies non-callable properties of `Series.dt`.
"""
def dt_op_builder(df, *args, **kwargs):
prop_val = getattr(df.squeeze(axis=1).dt, property_name)
if isinstance(prop_val, pandas.Series):
return prop_val.to_frame()
elif isinstance(prop_val, pandas.DataFrame):
return prop_val
else:
return pandas.DataFrame([prop_val])
return dt_op_builder
def _dt_func_map(func_name):
"""
Create a function that call method of property `dt` of the series.
Parameters
----------
func_name
The method of `dt`, which will be applied.
Returns
-------
A callable function to be applied in the partitions
Notes
-----
This applies callable methods of `Series.dt`.
"""
def dt_op_builder(df, *args, **kwargs):
dt_s = df.squeeze(axis=1).dt
return pandas.DataFrame(
getattr(pandas.Series.dt, func_name)(dt_s, *args, **kwargs)
)
return dt_op_builder
def copy_df_for_func(func):
"""
Create a function that copies the dataframe, likely because `func` is inplace.
Parameters
----------
func : callable
The function, usually updates a dataframe inplace.
Returns
-------
callable
A callable function to be applied in the partitions
"""
def caller(df, *args, **kwargs):
df = df.copy()
func(df, *args, **kwargs)
return df
return caller
class PandasQueryCompiler(BaseQueryCompiler):
"""This class implements the logic necessary for operating on partitions
with a Pandas backend. This logic is specific to Pandas."""
def __init__(self, modin_frame):
self._modin_frame = modin_frame
def default_to_pandas(self, pandas_op, *args, **kwargs):
"""Default to pandas behavior.
Parameters
----------
pandas_op : callable
The operation to apply, must be compatible pandas DataFrame call
args
The arguments for the `pandas_op`
kwargs
The keyword arguments for the `pandas_op`
Returns
-------
PandasQueryCompiler
The result of the `pandas_op`, converted back to PandasQueryCompiler
Note
----
This operation takes a distributed object and converts it directly to pandas.
"""
ErrorMessage.default_to_pandas(str(pandas_op))
args = (a.to_pandas() if isinstance(a, type(self)) else a for a in args)
kwargs = {
k: v.to_pandas if isinstance(v, type(self)) else v
for k, v in kwargs.items()
}
result = pandas_op(self.to_pandas(), *args, **kwargs)
if isinstance(result, pandas.Series):
if result.name is None:
result.name = "__reduced__"
result = result.to_frame()
if isinstance(result, pandas.DataFrame):
return self.from_pandas(result, type(self._modin_frame))
else:
return result
def to_pandas(self):
return self._modin_frame.to_pandas()
@classmethod
def from_pandas(cls, df, data_cls):
return cls(data_cls.from_pandas(df))
@classmethod
def from_arrow(cls, at, data_cls):
return cls(data_cls.from_arrow(at))
index = property(_get_axis(0), _set_axis(0))
columns = property(_get_axis(1), _set_axis(1))
@property
def dtypes(self):
return self._modin_frame.dtypes
# END Index, columns, and dtypes objects
# Metadata modification methods
def add_prefix(self, prefix, axis=1):
return self.__constructor__(self._modin_frame.add_prefix(prefix, axis))
def add_suffix(self, suffix, axis=1):
return self.__constructor__(self._modin_frame.add_suffix(suffix, axis))
# END Metadata modification methods
# Copy
# For copy, we don't want a situation where we modify the metadata of the
# copies if we end up modifying something here. We copy all of the metadata
# to prevent that.
def copy(self):
return self.__constructor__(self._modin_frame.copy())
# END Copy
# Append/Concat/Join (Not Merge)
# The append/concat/join operations should ideally never trigger remote
# compute. These operations should only ever be manipulations of the
# metadata of the resulting object. It should just be a simple matter of
# appending the other object's blocks and adding np.nan columns for the new
# columns, if needed. If new columns are added, some compute may be
# required, though it can be delayed.
#
# Currently this computation is not delayed, and it may make a copy of the
# DataFrame in memory. This can be problematic and should be fixed in the
# future. TODO (devin-petersohn): Delay reindexing
def concat(self, axis, other, **kwargs):
"""Concatenates two objects together.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concat with.
Returns:
Concatenated objects.
"""
if not isinstance(other, list):
other = [other]
assert all(
isinstance(o, type(self)) for o in other
), "Different Manager objects are being used. This is not allowed"
sort = kwargs.get("sort", None)
if sort is None:
sort = False
join = kwargs.get("join", "outer")
ignore_index = kwargs.get("ignore_index", False)
other_modin_frame = [o._modin_frame for o in other]
new_modin_frame = self._modin_frame._concat(axis, other_modin_frame, join, sort)
result = self.__constructor__(new_modin_frame)
if ignore_index:
if axis == 0:
return result.reset_index(drop=True)
else:
result.columns = pandas.RangeIndex(len(result.columns))
return result
return result
# END Append/Concat/Join
# Data Management Methods
def free(self):
"""In the future, this will hopefully trigger a cleanup of this object."""
# TODO create a way to clean up this object.
return
# END Data Management Methods
# To NumPy
def to_numpy(self, **kwargs):
"""
Converts Modin DataFrame to NumPy array.
Returns
-------
NumPy array of the QueryCompiler.
"""
arr = self._modin_frame.to_numpy(**kwargs)
ErrorMessage.catch_bugs_and_request_email(
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)
)
return arr
# END To NumPy
# Binary operations (e.g. add, sub)
# These operations require two DataFrames and will change the shape of the
# data if the index objects don't match. An outer join + op is performed,
# such that columns/rows that don't have an index on the other DataFrame
# result in NaN values.
add = BinaryFunction.register(pandas.DataFrame.add)
combine = BinaryFunction.register(pandas.DataFrame.combine)
combine_first = BinaryFunction.register(pandas.DataFrame.combine_first)
eq = BinaryFunction.register(pandas.DataFrame.eq)
floordiv = BinaryFunction.register(pandas.DataFrame.floordiv)
ge = BinaryFunction.register(pandas.DataFrame.ge)
gt = BinaryFunction.register(pandas.DataFrame.gt)
le = BinaryFunction.register(pandas.DataFrame.le)
lt = BinaryFunction.register(pandas.DataFrame.lt)
mod = BinaryFunction.register(pandas.DataFrame.mod)
mul = BinaryFunction.register(pandas.DataFrame.mul)
ne = BinaryFunction.register(pandas.DataFrame.ne)
pow = BinaryFunction.register(pandas.DataFrame.pow)
rfloordiv = BinaryFunction.register(pandas.DataFrame.rfloordiv)
rmod = BinaryFunction.register(pandas.DataFrame.rmod)
rpow = BinaryFunction.register(pandas.DataFrame.rpow)
rsub = BinaryFunction.register(pandas.DataFrame.rsub)
rtruediv = BinaryFunction.register(pandas.DataFrame.rtruediv)
sub = BinaryFunction.register(pandas.DataFrame.sub)
truediv = BinaryFunction.register(pandas.DataFrame.truediv)
__and__ = BinaryFunction.register(pandas.DataFrame.__and__)
__or__ = BinaryFunction.register(pandas.DataFrame.__or__)
__rand__ = BinaryFunction.register(pandas.DataFrame.__rand__)
__ror__ = BinaryFunction.register(pandas.DataFrame.__ror__)
__rxor__ = BinaryFunction.register(pandas.DataFrame.__rxor__)
__xor__ = BinaryFunction.register(pandas.DataFrame.__xor__)
df_update = BinaryFunction.register(
copy_df_for_func(pandas.DataFrame.update), join_type="left"
)
series_update = BinaryFunction.register(
copy_df_for_func(
lambda x, y: pandas.Series.update(x.squeeze(axis=1), y.squeeze(axis=1))
),
join_type="left",
)
def where(self, cond, other, **kwargs):
"""Gets values from this manager where cond is true else from other.
Args:
cond: Condition on which to evaluate values.
Returns:
New QueryCompiler with updated data and index.
"""
assert isinstance(
cond, type(self)
), "Must have the same QueryCompiler subclass to perform this operation"
if isinstance(other, type(self)):
# Note: Currently we are doing this with two maps across the entire
# data. This can be done with a single map, but it will take a
# modification in the `BlockPartition` class.
# If this were in one pass it would be ~2x faster.
# TODO (devin-petersohn) rewrite this to take one pass.
def where_builder_first_pass(cond, other, **kwargs):
return cond.where(cond, other, **kwargs)
first_pass = cond._modin_frame._binary_op(
where_builder_first_pass, other._modin_frame, join_type="left"
)
def where_builder_second_pass(df, new_other, **kwargs):
return df.where(new_other.eq(True), new_other, **kwargs)
new_modin_frame = self._modin_frame._binary_op(
where_builder_second_pass, first_pass, join_type="left"
)
# This will be a Series of scalars to be applied based on the condition
# dataframe.
else:
def where_builder_series(df, cond):
return df.where(cond, other, **kwargs)
new_modin_frame = self._modin_frame._binary_op(
where_builder_series, cond._modin_frame, join_type="left"
)
return self.__constructor__(new_modin_frame)
def merge(self, right, **kwargs):
"""
Merge DataFrame or named Series objects with a database-style join.
Parameters
----------
right : PandasQueryCompiler
The query compiler of the right DataFrame to merge with.
Returns
-------
PandasQueryCompiler
A new query compiler that contains result of the merge.
Notes
-----
See pd.merge or pd.DataFrame.merge for more info on kwargs.
"""
how = kwargs.get("how", "inner")
on = kwargs.get("on", None)
left_on = kwargs.get("left_on", None)
right_on = kwargs.get("right_on", None)
left_index = kwargs.get("left_index", False)
right_index = kwargs.get("right_index", False)
sort = kwargs.get("sort", False)
if how in ["left", "inner"] and left_index is False and right_index is False:
right = right.to_pandas()
kwargs["sort"] = False
def map_func(left, right=right, kwargs=kwargs):
return pandas.merge(left, right, **kwargs)
new_self = self.__constructor__(
self._modin_frame._apply_full_axis(1, map_func)
)
is_reset_index = True
if left_on and right_on:
left_on = left_on if is_list_like(left_on) else [left_on]
right_on = right_on if is_list_like(right_on) else [right_on]
is_reset_index = (
False
if any(o in new_self.index.names for o in left_on)
and any(o in right.index.names for o in right_on)
else True
)
if sort:
new_self = (
new_self.sort_rows_by_column_values(left_on.append(right_on))
if is_reset_index
else new_self.sort_index(axis=0, level=left_on.append(right_on))
)
if on:
on = on if is_list_like(on) else [on]
is_reset_index = not any(
o in new_self.index.names and o in right.index.names for o in on
)
if sort:
new_self = (
new_self.sort_rows_by_column_values(on)
if is_reset_index
else new_self.sort_index(axis=0, level=on)
)
return new_self.reset_index(drop=True) if is_reset_index else new_self
else:
return self.default_to_pandas(pandas.DataFrame.merge, right, **kwargs)
def join(self, right, **kwargs):
"""
Join columns of another DataFrame.
Parameters
----------
right : BaseQueryCompiler
The query compiler of the right DataFrame to join with.
Returns
-------
BaseQueryCompiler
A new query compiler that contains result of the join.
Notes
-----
See pd.DataFrame.join for more info on kwargs.
"""
on = kwargs.get("on", None)
how = kwargs.get("how", "left")
sort = kwargs.get("sort", False)
if how in ["left", "inner"]:
right = right.to_pandas()
def map_func(left, right=right, kwargs=kwargs):
return pandas.DataFrame.join(left, right, **kwargs)
new_self = self.__constructor__(
self._modin_frame._apply_full_axis(1, map_func)
)
return new_self.sort_rows_by_column_values(on) if sort else new_self
else:
return self.default_to_pandas(pandas.DataFrame.join, right, **kwargs)
# END Inter-Data operations
# Reindex/reset_index (may shuffle data)
def reindex(self, axis, labels, **kwargs):
"""Fits a new index for this Manager.
Args:
axis: The axis index object to target the reindex on.
labels: New labels to conform 'axis' on to.
Returns:
A new QueryCompiler with updated data and new index.
"""
new_index = self.index if axis else labels
new_columns = labels if axis else self.columns
new_modin_frame = self._modin_frame._apply_full_axis(
axis,
lambda df: df.reindex(labels=labels, axis=axis, **kwargs),
new_index=new_index,
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
def reset_index(self, **kwargs):
"""Removes all levels from index and sets a default level_0 index.
Returns:
A new QueryCompiler with updated data and reset index.
"""
drop = kwargs.get("drop", False)
level = kwargs.get("level", None)
# TODO Implement level
if level is not None or self.has_multiindex():
return self.default_to_pandas(pandas.DataFrame.reset_index, **kwargs)
if not drop:
new_column_name = (
self.index.name
if self.index.name is not None
else "index"
if "index" not in self.columns
else "level_0"
)
new_self = self.insert(0, new_column_name, self.index)
else:
new_self = self.copy()
new_self.index = pandas.RangeIndex(len(new_self.index))
return new_self
# END Reindex/reset_index
# Transpose
# For transpose, we aren't going to immediately copy everything. Since the
# actual transpose operation is very fast, we will just do it before any
# operation that gets called on the transposed data. See _prepare_method
# for how the transpose is applied.
#
# Our invariants assume that the blocks are transposed, but not the
# data inside. Sometimes we have to reverse this transposition of blocks
# for simplicity of implementation.
def transpose(self, *args, **kwargs):
"""Transposes this QueryCompiler.
Returns:
Transposed new QueryCompiler.
"""
# Switch the index and columns and transpose the data within the blocks.
return self.__constructor__(self._modin_frame.transpose())
def columnarize(self):
"""
Transposes this QueryCompiler if it has a single row but multiple columns.
This method should be called for QueryCompilers representing a Series object,
i.e. self.is_series_like() should be True.
Returns
-------
PandasQueryCompiler
Transposed new QueryCompiler or self.
"""
if len(self.columns) != 1 or (
len(self.index) == 1 and self.index[0] == "__reduced__"
):
return self.transpose()
return self
def is_series_like(self):
"""Return True if QueryCompiler has a single column or row"""
return len(self.columns) == 1 or len(self.index) == 1
# END Transpose
# MapReduce operations
def _is_monotonic(self, func_type=None):
funcs = {
"increasing": lambda df: df.is_monotonic_increasing,
"decreasing": lambda df: df.is_monotonic_decreasing,
}
monotonic_fn = funcs.get(func_type, funcs["increasing"])
def is_monotonic_map(df):
df = df.squeeze(axis=1)
return [monotonic_fn(df), df.iloc[0], df.iloc[len(df) - 1]]
def is_monotonic_reduce(df):
df = df.squeeze(axis=1)
common_case = df[0].all()
left_edges = df[1]
right_edges = df[2]
edges_list = []
for i in range(len(left_edges)):
edges_list.extend([left_edges.iloc[i], right_edges.iloc[i]])
edge_case = monotonic_fn(pandas.Series(edges_list))
return [common_case and edge_case]
return MapReduceFunction.register(
is_monotonic_map, is_monotonic_reduce, axis=0
)(self)
def is_monotonic_decreasing(self):
return self._is_monotonic(func_type="decreasing")
is_monotonic = _is_monotonic
count = MapReduceFunction.register(pandas.DataFrame.count, pandas.DataFrame.sum)
max = MapReduceFunction.register(pandas.DataFrame.max, pandas.DataFrame.max)
min = MapReduceFunction.register(pandas.DataFrame.min, pandas.DataFrame.min)
sum = MapReduceFunction.register(pandas.DataFrame.sum, pandas.DataFrame.sum)
prod = MapReduceFunction.register(pandas.DataFrame.prod, pandas.DataFrame.prod)
any = MapReduceFunction.register(pandas.DataFrame.any, pandas.DataFrame.any)
all = MapReduceFunction.register(pandas.DataFrame.all, pandas.DataFrame.all)
memory_usage = MapReduceFunction.register(
pandas.DataFrame.memory_usage,
lambda x, *args, **kwargs: pandas.DataFrame.sum(x),
axis=0,
)
mean = MapReduceFunction.register(
lambda df, **kwargs: df.apply(
lambda x: (x.sum(skipna=kwargs.get("skipna", True)), x.count()),
axis=kwargs.get("axis", 0),
result_type="reduce",
).set_axis(df.axes[kwargs.get("axis", 0) ^ 1], axis=0),
lambda df, **kwargs: df.apply(
lambda x: x.apply(lambda d: d[0]).sum(skipna=kwargs.get("skipna", True))
/ x.apply(lambda d: d[1]).sum(skipna=kwargs.get("skipna", True)),
axis=kwargs.get("axis", 0),
).set_axis(df.axes[kwargs.get("axis", 0) ^ 1], axis=0),
)
def value_counts(self, **kwargs):
"""
Return a QueryCompiler of Series containing counts of unique values.
Returns
-------
PandasQueryCompiler
"""
if kwargs.get("bins", None) is not None:
new_modin_frame = self._modin_frame._apply_full_axis(
0, lambda df: df.squeeze(axis=1).value_counts(**kwargs)
)
return self.__constructor__(new_modin_frame)
def map_func(df, *args, **kwargs):
return df.squeeze(axis=1).value_counts(**kwargs)
def reduce_func(df, *args, **kwargs):
normalize = kwargs.get("normalize", False)
sort = kwargs.get("sort", True)
ascending = kwargs.get("ascending", False)
dropna = kwargs.get("dropna", True)
try:
result = df.squeeze(axis=1).groupby(df.index, sort=False).sum()
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except (ValueError):
result = df.copy().squeeze(axis=1).groupby(df.index, sort=False).sum()
if not dropna and np.nan in df.index:
result = result.append(
pandas.Series(
[df.squeeze(axis=1).loc[[np.nan]].sum()], index=[np.nan]
)
)
if normalize:
result = result / df.squeeze(axis=1).sum()
result = result.sort_values(ascending=ascending) if sort else result
# We want to sort both values and indices of the result object.
# This function will sort indices for equal values.
def sort_index_for_equal_values(result, ascending):
"""
Sort indices for equal values of result object.
Parameters
----------
result : pandas.Series or pandas.DataFrame with one column
The object whose indices for equal values is needed to sort.
ascending : boolean
Sort in ascending (if it is True) or descending (if it is False) order.
Returns
-------
pandas.DataFrame
A new DataFrame with sorted indices.
"""
is_range = False
is_end = False
i = 0
new_index = np.empty(len(result), dtype=type(result.index))
while i < len(result):
j = i
if i < len(result) - 1:
while result[result.index[i]] == result[result.index[i + 1]]:
i += 1
if is_range is False:
is_range = True
if i == len(result) - 1:
is_end = True
break
if is_range:
k = j
for val in sorted(
result.index[j : i + 1], reverse=not ascending
):
new_index[k] = val
k += 1
if is_end:
break
is_range = False
else:
new_index[j] = result.index[j]
i += 1
return pandas.DataFrame(result, index=new_index)
return sort_index_for_equal_values(result, ascending)
return MapReduceFunction.register(map_func, reduce_func, preserve_index=False)(
self, **kwargs
)
# END MapReduce operations
# Reduction operations
idxmax = ReductionFunction.register(pandas.DataFrame.idxmax)
idxmin = ReductionFunction.register(pandas.DataFrame.idxmin)
median = ReductionFunction.register(pandas.DataFrame.median)
nunique = ReductionFunction.register(pandas.DataFrame.nunique)
skew = ReductionFunction.register(pandas.DataFrame.skew)
kurt = ReductionFunction.register(pandas.DataFrame.kurt)
sem = ReductionFunction.register(pandas.DataFrame.sem)
std = ReductionFunction.register(pandas.DataFrame.std)
var = ReductionFunction.register(pandas.DataFrame.var)
sum_min_count = ReductionFunction.register(pandas.DataFrame.sum)
prod_min_count = ReductionFunction.register(pandas.DataFrame.prod)
quantile_for_single_value = ReductionFunction.register(pandas.DataFrame.quantile)
mad = ReductionFunction.register(pandas.DataFrame.mad)
to_datetime = ReductionFunction.register(
lambda df, *args, **kwargs: pandas.to_datetime(
df.squeeze(axis=1), *args, **kwargs
),
axis=1,
)
# END Reduction operations
def _resample_func(
self, resample_args, func_name, new_columns=None, df_op=None, *args, **kwargs
):
def map_func(df, resample_args=resample_args):
if df_op is not None:
df = df_op(df)
resampled_val = df.resample(*resample_args)
op = getattr(pandas.core.resample.Resampler, func_name)
if callable(op):
try:
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
val = op(resampled_val, *args, **kwargs)
except (ValueError):
resampled_val = df.copy().resample(*resample_args)
val = op(resampled_val, *args, **kwargs)
else:
val = getattr(resampled_val, func_name)
if isinstance(val, pandas.Series):
return val.to_frame()
else:
return val
new_modin_frame = self._modin_frame._apply_full_axis(
axis=0, func=map_func, new_columns=new_columns
)
return self.__constructor__(new_modin_frame)
def resample_get_group(self, resample_args, name, obj):
return self._resample_func(resample_args, "get_group", name=name, obj=obj)
def resample_app_ser(self, resample_args, func, *args, **kwargs):
return self._resample_func(
resample_args,
"apply",
df_op=lambda df: df.squeeze(axis=1),
func=func,
*args,
**kwargs,
)
def resample_app_df(self, resample_args, func, *args, **kwargs):
return self._resample_func(resample_args, "apply", func=func, *args, **kwargs)
def resample_agg_ser(self, resample_args, func, *args, **kwargs):
return self._resample_func(
resample_args,
"aggregate",
df_op=lambda df: df.squeeze(axis=1),
func=func,
*args,
**kwargs,
)
def resample_agg_df(self, resample_args, func, *args, **kwargs):
return self._resample_func(
resample_args, "aggregate", func=func, *args, **kwargs
)
def resample_transform(self, resample_args, arg, *args, **kwargs):
return self._resample_func(resample_args, "transform", arg=arg, *args, **kwargs)
def resample_pipe(self, resample_args, func, *args, **kwargs):
return self._resample_func(resample_args, "pipe", func=func, *args, **kwargs)
def resample_ffill(self, resample_args, limit):
return self._resample_func(resample_args, "ffill", limit=limit)
def resample_backfill(self, resample_args, limit):
return self._resample_func(resample_args, "backfill", limit=limit)
def resample_bfill(self, resample_args, limit):
return self._resample_func(resample_args, "bfill", limit=limit)
def resample_pad(self, resample_args, limit):
return self._resample_func(resample_args, "pad", limit=limit)
def resample_nearest(self, resample_args, limit):
return self._resample_func(resample_args, "nearest", limit=limit)
def resample_fillna(self, resample_args, method, limit):
return self._resample_func(resample_args, "fillna", method=method, limit=limit)
def resample_asfreq(self, resample_args, fill_value):
return self._resample_func(resample_args, "asfreq", fill_value=fill_value)
def resample_interpolate(
self,
resample_args,
method,
axis,
limit,
inplace,
limit_direction,
limit_area,
downcast,
**kwargs,
):
return self._resample_func(
resample_args,
"interpolate",
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast,
**kwargs,
)
def resample_count(self, resample_args):
return self._resample_func(resample_args, "count")
def resample_nunique(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "nunique", _method=_method, *args, **kwargs
)
def resample_first(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "first", _method=_method, *args, **kwargs
)
def resample_last(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "last", _method=_method, *args, **kwargs
)
def resample_max(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "max", _method=_method, *args, **kwargs
)
def resample_mean(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "median", _method=_method, *args, **kwargs
)
def resample_median(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "median", _method=_method, *args, **kwargs
)
def resample_min(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "min", _method=_method, *args, **kwargs
)
def resample_ohlc_ser(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args,
"ohlc",
df_op=lambda df: df.squeeze(axis=1),
_method=_method,
*args,
**kwargs,
)
def resample_ohlc_df(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "ohlc", _method=_method, *args, **kwargs
)
def resample_prod(self, resample_args, _method, min_count, *args, **kwargs):
return self._resample_func(
resample_args, "prod", _method=_method, min_count=min_count, *args, **kwargs
)
def resample_size(self, resample_args):
return self._resample_func(resample_args, "size", new_columns=["__reduced__"])
def resample_sem(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "sem", _method=_method, *args, **kwargs
)
def resample_std(self, resample_args, ddof, *args, **kwargs):
return self._resample_func(resample_args, "std", ddof=ddof, *args, **kwargs)
def resample_sum(self, resample_args, _method, min_count, *args, **kwargs):
return self._resample_func(
resample_args, "sum", _method=_method, min_count=min_count, *args, **kwargs
)
def resample_var(self, resample_args, ddof, *args, **kwargs):
return self._resample_func(resample_args, "var", ddof=ddof, *args, **kwargs)
def resample_quantile(self, resample_args, q, **kwargs):
return self._resample_func(resample_args, "quantile", q=q, **kwargs)
window_mean = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).mean(*args, **kwargs)
)
)
window_sum = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).sum(*args, **kwargs)
)
)
window_var = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).var(ddof=ddof, *args, **kwargs)
)
)
window_std = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).std(ddof=ddof, *args, **kwargs)
)
)
rolling_count = FoldFunction.register(
lambda df, rolling_args: pandas.DataFrame(df.rolling(*rolling_args).count())
)
rolling_sum = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).sum(*args, **kwargs)
)
)
rolling_mean = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).mean(*args, **kwargs)
)
)
rolling_median = FoldFunction.register(
lambda df, rolling_args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).median(**kwargs)
)
)
rolling_var = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).var(ddof=ddof, *args, **kwargs)
)
)
rolling_std = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).std(ddof=ddof, *args, **kwargs)
)
)
rolling_min = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).min(*args, **kwargs)
)
)
rolling_max = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).max(*args, **kwargs)
)
)
rolling_skew = FoldFunction.register(
lambda df, rolling_args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).skew(**kwargs)
)
)
rolling_kurt = FoldFunction.register(
lambda df, rolling_args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).kurt(**kwargs)
)
)
rolling_apply = FoldFunction.register(
lambda df, rolling_args, func, raw, engine, engine_kwargs, args, kwargs: pandas.DataFrame(
df.rolling(*rolling_args).apply(
func=func,
raw=raw,
engine=engine,
engine_kwargs=engine_kwargs,
args=args,
kwargs=kwargs,
)
)
)
rolling_quantile = FoldFunction.register(
lambda df, rolling_args, quantile, interpolation, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).quantile(
quantile=quantile, interpolation=interpolation, **kwargs
)
)
)
def rolling_corr(self, rolling_args, other, pairwise, *args, **kwargs):
if len(self.columns) > 1:
return self.default_to_pandas(
lambda df: pandas.DataFrame.rolling(df, *rolling_args).corr(
other=other, pairwise=pairwise, *args, **kwargs
)
)
else:
return FoldFunction.register(
lambda df: pandas.DataFrame(
df.rolling(*rolling_args).corr(
other=other, pairwise=pairwise, *args, **kwargs
)
)
)(self)
def rolling_cov(self, rolling_args, other, pairwise, ddof, **kwargs):
if len(self.columns) > 1:
return self.default_to_pandas(
lambda df: pandas.DataFrame.rolling(df, *rolling_args).cov(
other=other, pairwise=pairwise, ddof=ddof, **kwargs
)
)
else:
return FoldFunction.register(
lambda df: pandas.DataFrame(
df.rolling(*rolling_args).cov(
other=other, pairwise=pairwise, ddof=ddof, **kwargs
)
)
)(self)
def rolling_aggregate(self, rolling_args, func, *args, **kwargs):
new_modin_frame = self._modin_frame._apply_full_axis(
0,
lambda df: pandas.DataFrame(
df.rolling(*rolling_args).aggregate(func=func, *args, **kwargs)
),
new_index=self.index,
)
return self.__constructor__(new_modin_frame)
def unstack(self, level, fill_value):
if not isinstance(self.index, pandas.MultiIndex) or (
isinstance(self.index, pandas.MultiIndex)
and is_list_like(level)
and len(level) == self.index.nlevels
):
axis = 1
new_columns = ["__reduced__"]
need_reindex = True
else:
axis = 0
new_columns = None
need_reindex = False
def map_func(df):
return pandas.DataFrame(df.unstack(level=level, fill_value=fill_value))
def is_tree_like_or_1d(calc_index, valid_index):
if not isinstance(calc_index, pandas.MultiIndex):
return True
actual_len = 1
for lvl in calc_index.levels:
actual_len *= len(lvl)
return len(self.index) * len(self.columns) == actual_len * len(valid_index)
is_tree_like_or_1d_index = is_tree_like_or_1d(self.index, self.columns)
is_tree_like_or_1d_cols = is_tree_like_or_1d(self.columns, self.index)
is_all_multi_list = False
if (
isinstance(self.index, pandas.MultiIndex)
and isinstance(self.columns, pandas.MultiIndex)
and is_list_like(level)
and len(level) == self.index.nlevels
and is_tree_like_or_1d_index
and is_tree_like_or_1d_cols
):
is_all_multi_list = True
real_cols_bkp = self.columns
obj = self.copy()
obj.columns = np.arange(len(obj.columns))
else:
obj = self
new_modin_frame = obj._modin_frame._apply_full_axis(
axis, map_func, new_columns=new_columns
)
result = self.__constructor__(new_modin_frame)
def compute_index(index, columns, consider_index=True, consider_columns=True):
def get_unique_level_values(index):
return [
index.get_level_values(lvl).unique()
for lvl in np.arange(index.nlevels)
]
new_index = (
get_unique_level_values(index)
if consider_index
else index
if isinstance(index, list)
else [index]
)
new_columns = (
get_unique_level_values(columns) if consider_columns else [columns]
)
return pandas.MultiIndex.from_product([*new_columns, *new_index])
if is_all_multi_list and is_tree_like_or_1d_index and is_tree_like_or_1d_cols:
result = result.sort_index()
index_level_values = [lvl for lvl in obj.index.levels]
result.index = compute_index(
index_level_values, real_cols_bkp, consider_index=False
)
return result
if need_reindex:
if is_tree_like_or_1d_index and is_tree_like_or_1d_cols:
is_recompute_index = isinstance(self.index, pandas.MultiIndex)
is_recompute_columns = not is_recompute_index and isinstance(
self.columns, pandas.MultiIndex
)
new_index = compute_index(
self.index, self.columns, is_recompute_index, is_recompute_columns
)
elif is_tree_like_or_1d_index != is_tree_like_or_1d_cols:
if isinstance(self.columns, pandas.MultiIndex) or not isinstance(
self.index, pandas.MultiIndex
):
return result
else:
index = (
self.index.sortlevel()[0]
if is_tree_like_or_1d_index
and not is_tree_like_or_1d_cols
and isinstance(self.index, pandas.MultiIndex)
else self.index
)
index = pandas.MultiIndex.from_tuples(
list(index) * len(self.columns)
)
columns = self.columns.repeat(len(self.index))
index_levels = [
index.get_level_values(i) for i in range(index.nlevels)
]
new_index = pandas.MultiIndex.from_arrays(
[columns] + index_levels,
names=self.columns.names + self.index.names,
)
else:
return result
result = result.reindex(0, new_index)
return result
def stack(self, level, dropna):
if not isinstance(self.columns, pandas.MultiIndex) or (
isinstance(self.columns, pandas.MultiIndex)
and | is_list_like(level) | pandas.core.dtypes.common.is_list_like |
from __future__ import print_function
import collections
import json
import logging
import os
import pickle
import sys
import numpy as np
import pandas as pd
import keras
from itertools import cycle, islice
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
from sklearn.model_selection import ShuffleSplit, KFold
import file_utils
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.abspath(os.path.join(file_path, '..', '..', 'common'))
sys.path.append(lib_path)
# import candle
import file_utils
global_cache = {}
SEED = 2018
P1B3_URL = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/P1B3/'
DATA_URL = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/Pilot1/combo/'
logger = logging.getLogger(__name__)
def set_up_logger(verbose=False):
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter(''))
sh.setLevel(logging.DEBUG if verbose else logging.INFO)
logger.setLevel(logging.DEBUG)
logger.addHandler(sh)
def set_seed(seed=SEED):
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(seed)
random.seed(seed)
def get_file(url):
fname = os.path.basename(url)
return file_utils.get_file(fname, origin=url, cache_subdir='Pilot1')
def impute_and_scale(df, scaling='std', imputing='mean', dropna='all'):
"""Impute missing values with mean and scale data included in pandas dataframe.
Parameters
----------
df : pandas dataframe
dataframe to impute and scale
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
if dropna:
df = df.dropna(axis=1, how=dropna)
else:
empty_cols = df.columns[df.notnull().sum() == 0]
df[empty_cols] = 0
if imputing is None or imputing.lower() == 'none':
mat = df.values
else:
imputer = Imputer(strategy=imputing, axis=0)
mat = imputer.fit_transform(df)
if scaling is None or scaling.lower() == 'none':
return pd.DataFrame(mat, columns=df.columns)
if scaling == 'maxabs':
scaler = MaxAbsScaler()
elif scaling == 'minmax':
scaler = MinMaxScaler()
else:
scaler = StandardScaler()
mat = scaler.fit_transform(mat)
df = pd.DataFrame(mat, columns=df.columns)
return df
def discretize(df, col, bins=2, cutoffs=None):
y = df[col]
thresholds = cutoffs
if thresholds is None:
percentiles = [100 / bins * (i + 1) for i in range(bins - 1)]
thresholds = [np.percentile(y, x) for x in percentiles]
classes = np.digitize(y, thresholds)
df[col] = classes
return df
def save_combined_dose_response():
df1 = load_single_dose_response(combo_format=True, fraction=False)
df2 = load_combo_dose_response(fraction=False)
df = pd.concat([df1, df2])
df.to_csv('combined_drug_growth', index=False, sep='\t')
def load_combined_dose_response(rename=True):
df1 = load_single_dose_response(combo_format=True)
logger.info('Loaded {} single drug dose response measurements'.format(df1.shape[0]))
df2 = load_combo_dose_response()
logger.info('Loaded {} drug pair dose response measurements'.format(df2.shape[0]))
df = pd.concat([df1, df2])
logger.info('Combined dose response data contains sources: {}'.format(df['SOURCE'].unique()))
if rename:
df = df.rename(columns={'SOURCE': 'Source', 'CELL': 'Sample',
'DRUG1': 'Drug1', 'DRUG2': 'Drug2',
'DOSE1': 'Dose1', 'DOSE2': 'Dose2',
'GROWTH': 'Growth', 'STUDY': 'Study'})
return df
def load_single_dose_response(combo_format=False, fraction=True):
# path = get_file(DATA_URL + 'combined_single_drug_growth')
path = get_file(DATA_URL + 'rescaled_combined_single_drug_growth')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na', '-', ''],
# nrows=10,
dtype={'SOURCE': str, 'DRUG_ID': str,
'CELLNAME': str, 'CONCUNIT': str,
'LOG_CONCENTRATION': np.float32,
'EXPID': str, 'GROWTH': np.float32})
global_cache[path] = df
df['DOSE'] = -df['LOG_CONCENTRATION']
df = df.rename(columns={'CELLNAME': 'CELL', 'DRUG_ID': 'DRUG', 'EXPID': 'STUDY'})
df = df[['SOURCE', 'CELL', 'DRUG', 'DOSE', 'GROWTH', 'STUDY']]
if fraction:
df['GROWTH'] /= 100
if combo_format:
df = df.rename(columns={'DRUG': 'DRUG1', 'DOSE': 'DOSE1'})
df['DRUG2'] = np.nan
df['DOSE2'] = np.nan
df['DRUG2'] = df['DRUG2'].astype(object)
df['DOSE2'] = df['DOSE2'].astype(np.float32)
df = df[['SOURCE', 'CELL', 'DRUG1', 'DOSE1', 'DRUG2', 'DOSE2', 'GROWTH', 'STUDY']]
return df
def load_combo_dose_response(fraction=True):
path = get_file(DATA_URL + 'ComboDrugGrowth_Nov2017.csv')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep=',', engine='c',
na_values=['na', '-', ''],
usecols=['CELLNAME', 'NSC1', 'CONC1', 'NSC2', 'CONC2',
'PERCENTGROWTH', 'VALID', 'SCREENER', 'STUDY'],
# nrows=10000,
dtype={'CELLNAME': str, 'NSC1': str, 'NSC2': str,
'CONC1': np.float32, 'CONC2': np.float32,
'PERCENTGROWTH': np.float32, 'VALID': str,
'SCREENER': str, 'STUDY': str},
error_bad_lines=False, warn_bad_lines=True)
global_cache[path] = df
df = df[df['VALID'] == 'Y']
df['SOURCE'] = 'ALMANAC.' + df['SCREENER']
cellmap_path = get_file(DATA_URL + 'NCI60_CELLNAME_to_Combo.txt')
df_cellmap = pd.read_csv(cellmap_path, sep='\t')
df_cellmap.set_index('Name', inplace=True)
cellmap = df_cellmap[['NCI60.ID']].to_dict()['NCI60.ID']
df['CELL'] = df['CELLNAME'].map(lambda x: cellmap[x])
df['DOSE1'] = -np.log10(df['CONC1'])
df['DOSE2'] = -np.log10(df['CONC2'])
df['DRUG1'] = 'NSC.' + df['NSC1']
df['DRUG2'] = 'NSC.' + df['NSC2']
if fraction:
df['GROWTH'] = df['PERCENTGROWTH'] / 100
else:
df['GROWTH'] = df['PERCENTGROWTH']
df = df[['SOURCE', 'CELL', 'DRUG1', 'DOSE1', 'DRUG2', 'DOSE2', 'GROWTH', 'STUDY']]
return df
def load_aggregated_single_response(target='AUC', min_r2_fit=0.3, max_ec50_se=3, combo_format=False, rename=True):
path = get_file(DATA_URL + 'combined_single_response_agg')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, engine='c', sep='\t',
dtype={'SOURCE': str, 'CELL': str, 'DRUG': str, 'STUDY': str,
'AUC': np.float32, 'IC50': np.float32,
'EC50': np.float32, 'EC50se': np.float32,
'R2fit': np.float32, 'Einf': np.float32,
'HS': np.float32, 'AAC1': np.float32,
'AUC1': np.float32, 'DSS1': np.float32})
global_cache[path] = df
total = len(df)
df = df[(df['R2fit'] >= min_r2_fit) & (df['EC50se'] <= max_ec50_se)]
df = df[['SOURCE', 'CELL', 'DRUG', target, 'STUDY']]
df = df[~df[target].isnull()]
logger.info('Loaded %d dose indepdendent response samples (filtered by EC50se <= %f & R2fit >=%f from a total of %d).', len(df), max_ec50_se, min_r2_fit, total)
if combo_format:
df = df.rename(columns={'DRUG': 'DRUG1'})
df['DRUG2'] = np.nan
df['DRUG2'] = df['DRUG2'].astype(object)
df = df[['SOURCE', 'CELL', 'DRUG1', 'DRUG2', target, 'STUDY']]
if rename:
df = df.rename(columns={'SOURCE': 'Source', 'CELL': 'Sample',
'DRUG1': 'Drug1', 'DRUG2': 'Drug2', 'STUDY': 'Study'})
else:
if rename:
df = df.rename(columns={'SOURCE': 'Source', 'CELL': 'Sample',
'DRUG': 'Drug', 'STUDY': 'Study'})
return df
def load_drug_data(ncols=None, scaling='std', imputing='mean', dropna=None, add_prefix=True):
df_info = load_drug_info()
df_info['Drug'] = df_info['PUBCHEM']
df_desc = load_drug_set_descriptors(drug_set='Combined_PubChem', ncols=ncols)
df_fp = load_drug_set_fingerprints(drug_set='Combined_PubChem', ncols=ncols)
df_desc = pd.merge(df_info[['ID', 'Drug']], df_desc, on='Drug').drop('Drug', 1).rename(columns={'ID': 'Drug'})
df_fp = pd.merge(df_info[['ID', 'Drug']], df_fp, on='Drug').drop('Drug', 1).rename(columns={'ID': 'Drug'})
df_desc2 = load_drug_set_descriptors(drug_set='NCI60', usecols=df_desc.columns.tolist() if ncols else None)
df_fp2 = load_drug_set_fingerprints(drug_set='NCI60', usecols=df_fp.columns.tolist() if ncols else None)
df_desc = pd.concat([df_desc, df_desc2]).reset_index(drop=True)
df1 = pd.DataFrame(df_desc.loc[:, 'Drug'])
df2 = df_desc.drop('Drug', 1)
df2 = impute_and_scale(df2, scaling=scaling, imputing=imputing, dropna=dropna)
if add_prefix:
df2 = df2.add_prefix('dragon7.')
df_desc = pd.concat([df1, df2], axis=1)
df_fp = pd.concat([df_fp, df_fp2]).reset_index(drop=True)
df1 = pd.DataFrame(df_fp.loc[:, 'Drug'])
df2 = df_fp.drop('Drug', 1)
df2 = impute_and_scale(df2, scaling=None, imputing=imputing, dropna=dropna)
if add_prefix:
df2 = df2.add_prefix('dragon7.')
df_fp = pd.concat([df1, df2], axis=1)
logger.info('Loaded combined dragon7 drug descriptors: %s', df_desc.shape)
logger.info('Loaded combined dragon7 drug fingerprints: %s', df_fp.shape)
return df_desc, df_fp
def load_drug_descriptors(ncols=None, scaling='std', imputing='mean', dropna=None, add_prefix=True, feature_subset=None):
df_info = load_drug_info()
df_info['Drug'] = df_info['PUBCHEM']
df_desc = load_drug_set_descriptors(drug_set='Combined_PubChem', ncols=ncols)
df_desc = pd.merge(df_info[['ID', 'Drug']], df_desc, on='Drug').drop('Drug', 1).rename(columns={'ID': 'Drug'})
df_desc2 = load_drug_set_descriptors(drug_set='NCI60', usecols=df_desc.columns.tolist() if ncols else None)
df_desc = pd.concat([df_desc, df_desc2]).reset_index(drop=True)
df1 = pd.DataFrame(df_desc.loc[:, 'Drug'])
df2 = df_desc.drop('Drug', 1)
if add_prefix:
df2 = df2.add_prefix('dragon7.')
if feature_subset:
df2 = df2[[x for x in df2.columns if x in feature_subset]]
df2 = impute_and_scale(df2, scaling=scaling, imputing=imputing, dropna=dropna)
df_desc = pd.concat([df1, df2], axis=1)
logger.info('Loaded combined dragon7 drug descriptors: %s', df_desc.shape)
return df_desc
def load_drug_fingerprints(ncols=None, scaling='std', imputing='mean', dropna=None, add_prefix=True, feature_subset=None):
df_info = load_drug_info()
df_info['Drug'] = df_info['PUBCHEM']
df_fp = load_drug_set_fingerprints(drug_set='Combined_PubChem', ncols=ncols)
df_fp = pd.merge(df_info[['ID', 'Drug']], df_fp, on='Drug').drop('Drug', 1).rename(columns={'ID': 'Drug'})
df_fp2 = load_drug_set_fingerprints(drug_set='NCI60', usecols=df_fp.columns.tolist() if ncols else None)
df_fp = pd.concat([df_fp, df_fp2]).reset_index(drop=True)
df1 = pd.DataFrame(df_fp.loc[:, 'Drug'])
df2 = df_fp.drop('Drug', 1)
if add_prefix:
df2 = df2.add_prefix('dragon7.')
if feature_subset:
df2 = df2[[x for x in df2.columns if x in feature_subset]]
df2 = impute_and_scale(df2, scaling=None, imputing=imputing, dropna=dropna)
df_fp = pd.concat([df1, df2], axis=1)
logger.info('Loaded combined dragon7 drug fingerprints: %s', df_fp.shape)
return df_fp
def load_drug_info():
path = get_file(DATA_URL + 'drug_info')
df = pd.read_csv(path, sep='\t', dtype=object)
df['PUBCHEM'] = 'PubChem.CID.' + df['PUBCHEM']
return df
def lookup(df, query, ret, keys, match='match'):
mask = pd.Series(False, index=range(df.shape[0]))
for key in keys:
if match == 'contains':
mask |= df[key].str.contains(query.upper(), case=False)
else:
mask |= (df[key].str.upper() == query.upper())
return list(set(df[mask][ret].values.flatten().tolist()))
def load_cell_metadata():
path = get_file(DATA_URL + 'cl_metadata')
df = pd.read_csv(path, sep='\t')
return df
def cell_name_to_ids(name, source=None):
path = get_file(DATA_URL + 'NCI60_CELLNAME_to_Combo.txt')
df1 = pd.read_csv(path, sep='\t')
hits1 = lookup(df1, name, 'NCI60.ID', ['NCI60.ID', 'CELLNAME', 'Name'], match='contains')
path = get_file(DATA_URL + 'cl_mapping')
df2 = pd.read_csv(path, sep='\t', header=None)
hits2 = lookup(df2, name, [0, 1], [0, 1], match='contains')
hits = hits1 + hits2
if source:
hits = [x for x in hits if x.startswith(source.upper() + '.')]
return hits
def drug_name_to_ids(name, source=None):
df1 = load_drug_info()
path = get_file(DATA_URL + 'NCI_IOA_AOA_drugs')
df2 = pd.read_csv(path, sep='\t', dtype=str)
df2['NSC'] = 'NSC.' + df2['NSC']
hits1 = lookup(df1, name, 'ID', ['ID', 'NAME', 'CLEAN_NAME', 'PUBCHEM'])
hits2 = lookup(df2, name, 'NSC', ['NSC', 'Generic Name', 'Preffered Name'])
hits = hits1 + hits2
if source:
hits = [x for x in hits if x.startswith(source.upper() + '.')]
return hits
def load_drug_set_descriptors(drug_set='Combined_PubChem', ncols=None, usecols=None,
scaling=None, imputing=None, add_prefix=False):
path = get_file(DATA_URL + '{}_dragon7_descriptors.tsv'.format(drug_set))
df_cols = pd.read_csv(path, engine='c', sep='\t', nrows=0)
total = df_cols.shape[1] - 1
if usecols is not None:
usecols = [x for x in usecols if x in df_cols.columns]
if usecols[0] != 'NAME':
usecols = ['NAME'] + usecols
df_cols = df_cols.loc[:, usecols]
elif ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
usecols = np.append([0], np.add(sorted(usecols), 1))
df_cols = df_cols.iloc[:, usecols]
dtype_dict = dict((x, np.float32) for x in df_cols.columns[1:])
df = pd.read_csv(path, engine='c', sep='\t', usecols=usecols, dtype=dtype_dict,
na_values=['na', '-', ''])
df1 = pd.DataFrame(df.loc[:, 'NAME'])
df1.rename(columns={'NAME': 'Drug'}, inplace=True)
df2 = df.drop('NAME', 1)
if add_prefix:
df2 = df2.add_prefix('dragon7.')
df2 = impute_and_scale(df2, scaling, imputing, dropna=None)
df = pd.concat([df1, df2], axis=1)
return df
def load_drug_set_fingerprints(drug_set='Combined_PubChem', ncols=None, usecols=None,
scaling=None, imputing=None, add_prefix=False):
fps = ['PFP', 'ECFP']
usecols_all = usecols
df_merged = None
for fp in fps:
path = get_file(DATA_URL + '{}_dragon7_{}.tsv'.format(drug_set, fp))
df_cols = pd.read_csv(path, engine='c', sep='\t', nrows=0, skiprows=1, header=None)
total = df_cols.shape[1] - 1
if usecols_all is not None:
usecols = [x.replace(fp + '.', '') for x in usecols_all]
usecols = [int(x) for x in usecols if x.isdigit()]
usecols = [x for x in usecols if x in df_cols.columns]
if usecols[0] != 0:
usecols = [0] + usecols
df_cols = df_cols.loc[:, usecols]
elif ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
usecols = np.append([0], np.add(sorted(usecols), 1))
df_cols = df_cols.iloc[:, usecols]
dtype_dict = dict((x, np.float32) for x in df_cols.columns[1:])
df = pd.read_csv(path, engine='c', sep='\t', skiprows=1, header=None,
usecols=usecols, dtype=dtype_dict)
df.columns = ['{}.{}'.format(fp, x) for x in df.columns]
col1 = '{}.0'.format(fp)
df1 = pd.DataFrame(df.loc[:, col1])
df1.rename(columns={col1: 'Drug'}, inplace=True)
df2 = df.drop(col1, 1)
if add_prefix:
df2 = df2.add_prefix('dragon7.')
df2 = impute_and_scale(df2, scaling, imputing, dropna=None)
df = pd.concat([df1, df2], axis=1)
df_merged = df if df_merged is None else df_merged.merge(df)
return df_merged
# def load_drug_smiles():
# path = get_file(DATA_URL + 'ChemStructures_Consistent.smiles')
# df = global_cache.get(path)
# if df is None:
# df = pd.read_csv(path, sep='\t', engine='c', dtype={'nsc_id':object})
# df = df.rename(columns={'nsc_id': 'NSC'})
# global_cache[path] = df
# return df
def encode_sources(sources):
df = pd.get_dummies(sources, prefix='source', prefix_sep='.')
df['Source'] = sources
source_l1 = df['Source'].str.extract('^(\S+)\.', expand=False)
df1 = pd.get_dummies(source_l1, prefix='source.L1', prefix_sep='.')
df = pd.concat([df1, df], axis=1)
df = df.set_index('Source').reset_index()
return df
def load_cell_rnaseq(ncols=None, scaling='std', imputing='mean', add_prefix=True,
use_landmark_genes=False, use_filtered_genes=False,
feature_subset=None, preprocess_rnaseq=None,
embed_feature_source=False, sample_set=None, index_by_sample=False):
if use_landmark_genes:
filename = 'combined_rnaseq_data_lincs1000'
elif use_filtered_genes:
filename = 'combined_rnaseq_data_filtered'
else:
filename = 'combined_rnaseq_data'
if preprocess_rnaseq and preprocess_rnaseq != 'none':
scaling = None
filename += ('_' + preprocess_rnaseq) # 'source_scale' or 'combat'
path = get_file(DATA_URL + filename)
df_cols = pd.read_csv(path, engine='c', sep='\t', nrows=0)
total = df_cols.shape[1] - 1 # remove Sample column
if 'Cancer_type_id' in df_cols.columns:
total -= 1
usecols = None
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
usecols = np.append([0], np.add(sorted(usecols), 2))
df_cols = df_cols.iloc[:, usecols]
if feature_subset:
with_prefix = lambda x: 'rnaseq.' + x if add_prefix else x
usecols = [0] + [i for i, c in enumerate(df_cols.columns) if with_prefix(c) in feature_subset]
df_cols = df_cols.iloc[:, usecols]
dtype_dict = dict((x, np.float32) for x in df_cols.columns[1:])
df = pd.read_csv(path, engine='c', sep='\t', usecols=usecols, dtype=dtype_dict)
if 'Cancer_type_id' in df.columns:
df.drop('Cancer_type_id', axis=1, inplace=True)
prefixes = df['Sample'].str.extract('^([^.]*)', expand=False).rename('Source')
sources = prefixes.drop_duplicates().reset_index(drop=True)
df_source = pd.get_dummies(sources, prefix='rnaseq.source', prefix_sep='.')
df_source = pd.concat([sources, df_source], axis=1)
df1 = df['Sample']
if embed_feature_source:
df_sample_source = pd.concat([df1, prefixes], axis=1)
df1 = df_sample_source.merge(df_source, on='Source', how='left').drop('Source', axis=1)
logger.info('Embedding RNAseq data source into features: %d additional columns', df1.shape[1] - 1)
df2 = df.drop('Sample', 1)
if add_prefix:
df2 = df2.add_prefix('rnaseq.')
df2 = impute_and_scale(df2, scaling, imputing)
df = | pd.concat([df1, df2], axis=1) | pandas.concat |
import matplotlib
from numpy.lib.shape_base import _put_along_axis_dispatcher
from numpy.ma.extras import average
from numpy.testing._private.utils import tempdir
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import cm
import pandas as pd
from data_handler import load_data
import os
import seaborn as sns
import numpy as np
import scipy.stats as stats
from bioinfokit.analys import stat
from shapely.geometry import Point
import geopandas as gpd
from geopandas import GeoDataFrame
from tqdm import tqdm
import sys
dir_output = sys.argv[1]
if not os.path.exists(dir_output):
os.makedirs(dir_output)
data_folder = sys.argv[2]
type_delay = sys.argv[3]
# read in data
data, plane_data, carriers, airport = load_data(data_folder)
# filter data
data = data.dropna(subset=[type_delay])
# plot the world with color as average delay from the specific airport
geometry = [Point(xy) for xy in zip(airport.long, airport.lat)]
gdf = GeoDataFrame(airport, geometry=geometry)
# get average delay for airport
average_delay_airport = list()
for ap in tqdm(airport.iata):
temp_data = data[data.Dest == ap]
average_delay_airport.append(temp_data[type_delay].mean())
# create color map
lower = np.nanmin(average_delay_airport)
upper = np.nanmax(average_delay_airport)
colors_map_average_delay = cm.Reds((average_delay_airport-lower)/(upper-lower))
#this is a simple map that goes with geopandas
world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
plt.figure()
gdf.plot(ax=world.plot(figsize=(10, 6)), marker='o', color=colors_map_average_delay, markersize=15)
plt.xlim([-175, -50])
plt.ylim([10, 75])
plt.axis('off')
plt.savefig(os.path.join(dir_output, 'world_map_color_average_delay_zoom.pdf'), bbox_inches='tight')
plt.close()
#this is a simple map that goes with geopandas
world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
plt.figure()
gdf.plot(ax=world.plot(figsize=(10, 6)), marker='o', color=colors_map_average_delay, markersize=15)
plt.axis('off')
plt.savefig(os.path.join(dir_output, 'world_map_color_average_delay.pdf'), bbox_inches='tight')
plt.close()
# plot world card with JFK and LAX
airport_lax_jfk = pd.concat([airport[airport['iata'] == 'LAX'], airport[airport['iata'] == 'JFK']])
geometry_lax_jfk = [Point(xy) for xy in zip(airport_lax_jfk.long, airport_lax_jfk.lat)]
gdf_lax_jfk = GeoDataFrame(airport_lax_jfk, geometry=geometry_lax_jfk)
#this is a simple map that goes with geopandas
world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
plt.figure()
gdf_lax_jfk.plot(ax=world.plot(figsize=(10, 6)), marker='o', color='red', markersize=30)
plt.xlim([-130, -60])
plt.ylim([10, 55])
plt.axis('off')
plt.savefig(os.path.join(dir_output, 'world_map_lax_jfk.pdf'), bbox_inches='tight')
plt.close()
# filter for specific destinations
data = data[data['Origin'].isin(['JFK', 'LAX'])]
data = data[data['Dest'].isin(['JFK', 'LAX'])]
# plot barplot delay per year
plt.figure()
ax_sns = sns.barplot(data=data, x='Year', y=type_delay)
plt.xticks(rotation=90)
plt.savefig(os.path.join(dir_output, 'barplot_year.pdf'), bbox_inches='tight')
plt.close()
# plot barplot delay per year and month
data = data.sort_values(by=['Month'])
plt.figure(figsize=(15,4))
ax = plt.subplot(111)
ax_sns = sns.barplot(data=data, x='Year', y=type_delay, hue='Month')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='upper left', bbox_to_anchor=(1, 1))
plt.xticks(rotation=90)
plt.savefig(os.path.join(dir_output, 'barplot_year_month.pdf'), bbox_inches='tight')
plt.close()
# create seasons variable
winter_month = [12, 1 ,2]
spring_month = [3, 4, 5]
summer_month = [6, 7, 8]
autumn_month = [9, 10, 11]
data['Season'] = ['']*data.shape[0]
for mo in range(1, 13):
if mo in winter_month:
data['Season'][data['Month'] == mo] = 'Winter'
elif mo in spring_month:
data['Season'][data['Month'] == mo] = 'Spring'
elif mo in summer_month:
data['Season'][data['Month'] == mo] = 'Summer'
elif mo in autumn_month:
data['Season'][data['Month'] == mo] = 'Autumn'
color_seasons = {'Winter' : 'cornflowerblue', 'Spring' : 'mediumseagreen', 'Summer' : 'yellow', 'Autumn' : 'darkgoldenrod'}
# plot barplot delay per year and season
plt.figure(figsize=(10,4))
ax = plt.subplot(111)
ax_sns = sns.barplot(data=data, x='Year', y=type_delay, hue='Season', palette=color_seasons)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='upper left', bbox_to_anchor=(1, 1))
plt.xticks(rotation=90)
plt.savefig(os.path.join(dir_output, 'barplot_year_seasons.pdf'), bbox_inches='tight')
plt.close()
# plot number of flights per year-month
data = data.sort_values(by=['Year', 'Month'])
number_of_flights = list()
number_of_flights_to_LAX = list()
number_of_flights_to_JFK = list()
year_x_label = list()
month_x_label = list()
for y in data.Year.unique():
for m in np.sort(data.Month.unique()):
temp_data = data[data['Year'] == y]
temp_data = temp_data[temp_data['Month'] == m]
if temp_data.shape[0] > 0:
number_of_flights.append(temp_data.shape[0])
number_of_flights_to_LAX.append(temp_data[temp_data['Dest'] == 'LAX'].shape[0])
number_of_flights_to_JFK.append(temp_data[temp_data['Dest'] == 'JFK'].shape[0])
if not y in year_x_label and m == 1:
year_x_label.append(y)
else:
year_x_label.append('')
month_x_label.append(m)
linewidth = 2
plt.figure(figsize=(10,4))
ax = plt.subplot(111)
x_axis_time = list(range(len(number_of_flights)))
plt.plot(x_axis_time, number_of_flights, label='Total', linewidth=linewidth)
plt.plot(x_axis_time, number_of_flights_to_LAX, label='To LAX', linestyle='dashed', linewidth=linewidth)
plt.plot(x_axis_time, number_of_flights_to_JFK, label='To JFK', linestyle=(0, (1,1)), linewidth=linewidth)
for i, y in enumerate(year_x_label):
if y != '':
plt.axvline(x=x_axis_time[i], linestyle='dashed', linewidth = 0.5, color='gray')
plt.xlabel('Time', fontsize=18)
plt.xticks(x_axis_time, year_x_label, rotation=90)
plt.ylabel('Number of flights', fontsize=18)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='upper left', bbox_to_anchor=(1, 1))
plt.savefig(os.path.join(dir_output, 'number_of_flights.pdf'), bbox_inches='tight')
plt.close()
# plot delay per year-month
data = data.sort_values(by=['Year', 'Month'])
avg_arrival_delay = list()
avg_departure_delay = list()
avg_carrier_delay = list()
avg_weather_delay = list()
avg_nas_delay = list()
avg_security_delay = list()
avg_late_aircraft_delay = list()
year_x_label = list()
month_x_label = list()
ind_first_special_delay = 0
for y in data.Year.unique():
for m in np.sort(data.Month.unique()):
temp_data = data[data['Year'] == y]
temp_data = temp_data[temp_data['Month'] == m]
if temp_data.shape[0] > 0:
avg_arrival_delay.append(temp_data.ArrDelay.dropna().mean())
avg_departure_delay.append(temp_data.DepDelay.dropna().mean())
avg_carrier_delay.append(temp_data.CarrierDelay.dropna().mean())
avg_weather_delay.append(temp_data.WeatherDelay.dropna().mean())
avg_nas_delay.append(temp_data.NASDelay.dropna().mean())
avg_security_delay.append(temp_data.SecurityDelay.dropna().mean())
avg_late_aircraft_delay.append(temp_data.LateAircraftDelay.dropna().mean())
if len(temp_data.LateAircraftDelay.dropna()) == 0:
ind_first_special_delay +=1
if not y in year_x_label and m == 1:
year_x_label.append(y)
else:
year_x_label.append('')
month_x_label.append(m)
avg_arrival_delay = np.asarray(avg_arrival_delay)
avg_departure_delay = np.asarray(avg_departure_delay)
avg_carrier_delay = np.asarray(avg_carrier_delay)
avg_weather_delay = np.asarray(avg_weather_delay)
avg_nas_delay = np.asarray(avg_nas_delay)
avg_security_delay = np.asarray(avg_security_delay)
avg_late_aircraft_delay = np.asarray(avg_late_aircraft_delay)
linewidth = 1
plt.figure(figsize=(10,4))
ax = plt.subplot(111)
x_axis_time = np.asarray(list(range(len(avg_arrival_delay))))
plt.plot(x_axis_time, avg_arrival_delay, label='Arrival', linewidth=linewidth)
plt.plot(x_axis_time, avg_departure_delay, label='Departure', linewidth=linewidth)
plt.plot(x_axis_time, avg_carrier_delay, label='Carrier', linewidth=linewidth)
plt.plot(x_axis_time, avg_weather_delay, label='Weather', linewidth=linewidth)
plt.plot(x_axis_time, avg_nas_delay, label='NAS', linewidth=linewidth)
plt.plot(x_axis_time, avg_security_delay, label='Security', linewidth=linewidth)
plt.plot(x_axis_time, avg_late_aircraft_delay, label='Late Aircraft', linewidth=linewidth)
plt.hlines(y=0, xmin=np.min(x_axis_time), xmax=np.max(x_axis_time), linestyles='dashed', linewidth=0.5, color='gray')
for i, y in enumerate(year_x_label):
if y != '':
plt.axvline(x=x_axis_time[i], linestyle='dashed', linewidth = 0.5, color='gray')
plt.xlabel('Time', fontsize=18)
plt.xticks(x_axis_time, year_x_label, rotation=90)
plt.ylabel('Average delay [minutes]', fontsize=18)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='upper left', bbox_to_anchor=(1, 1))
plt.savefig(os.path.join(dir_output, 'types-of-delay-over-time.pdf'), bbox_inches='tight')
plt.close()
linewidth = 1
plt.figure(figsize=(10,4))
ax = plt.subplot(111)
x_axis_time = list(range(len(avg_arrival_delay)))
plt.plot(x_axis_time[ind_first_special_delay:], avg_arrival_delay[ind_first_special_delay:], label='Arrival', linewidth=linewidth)
plt.plot(x_axis_time[ind_first_special_delay:], avg_departure_delay[ind_first_special_delay:], label='Departure', linewidth=linewidth)
plt.plot(x_axis_time[ind_first_special_delay:], avg_carrier_delay[ind_first_special_delay:], label='Carrier', linewidth=linewidth)
plt.plot(x_axis_time[ind_first_special_delay:], avg_weather_delay[ind_first_special_delay:], label='Weather', linewidth=linewidth)
plt.plot(x_axis_time[ind_first_special_delay:], avg_nas_delay[ind_first_special_delay:], label='NAS', linewidth=linewidth)
plt.plot(x_axis_time[ind_first_special_delay:], avg_security_delay[ind_first_special_delay:], label='Security', linewidth=linewidth)
plt.plot(x_axis_time[ind_first_special_delay:], avg_late_aircraft_delay[ind_first_special_delay:], label='Late Aircraft', linewidth=linewidth)
plt.hlines(y=0, xmin=np.min(x_axis_time[ind_first_special_delay:]), xmax=np.max(x_axis_time[ind_first_special_delay:]), linestyles='dashed', linewidth=0.5, color='gray')
for i, y in enumerate(year_x_label[ind_first_special_delay:]):
if y != '':
plt.axvline(x=x_axis_time[ind_first_special_delay:][i], linestyle='dashed', linewidth = 0.5, color='gray')
plt.xlabel('Time', fontsize=18)
plt.xticks(x_axis_time[ind_first_special_delay:], year_x_label[ind_first_special_delay:], rotation=90)
plt.ylabel('Average delay [minutes]', fontsize=18)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='upper left', bbox_to_anchor=(1, 1))
plt.savefig(os.path.join(dir_output, 'types-of-delay-over-time_zoom.pdf'), bbox_inches='tight')
plt.close()
# plot scatter of month vs. delay at arrival and color for seasons
# plot scatter of number of flights vs. delay at arrival and color for seasons
data = data.sort_values(by=['Year', 'Month'])
average_delay = list()
number_of_flights = list()
month = list()
color_scatter = list()
label_scatter = list()
for y in data.Year.unique():
for m in np.sort(data.Month.unique()):
temp_data = data[data['Year'] == y]
temp_data = temp_data[temp_data['Month'] == m]
if temp_data.shape[0] > 0:
number_of_flights.append(temp_data.shape[0])
average_delay.append(temp_data[type_delay].mean())
if m in winter_month:
color_scatter.append(color_seasons['Winter'])
label_scatter.append('Winter')
elif m in spring_month:
color_scatter.append(color_seasons['Spring'])
label_scatter.append('Spring')
elif m in summer_month:
color_scatter.append(color_seasons['Summer'])
label_scatter.append('Summer')
elif m in autumn_month:
color_scatter.append(color_seasons['Autumn'])
label_scatter.append('Autumn')
# with added jitter
month.append(m + np.random.uniform(-0.4, 0.4, 1)[0])
average_delay = np.asarray(average_delay)
number_of_flights = np.asarray(number_of_flights)
month = np.asarray(month)
color_scatter = np.asarray(color_scatter)
label_scatter = np.asarray(label_scatter)
dict_number_flights_vs_delay = {}
dict_number_flights_vs_delay['Average_delay'] = average_delay
dict_number_flights_vs_delay['Number_of_flights'] = number_of_flights
dict_number_flights_vs_delay['Month'] = month
dict_number_flights_vs_delay['Season'] = label_scatter
data_number_flights_vs_delay = pd.DataFrame(dict_number_flights_vs_delay)
plt.figure()
ax = plt.subplot(111)
for g in np.unique(label_scatter):
ind_g = np.where(label_scatter == g)[0]
plt.scatter(month[ind_g], average_delay[ind_g], c=color_scatter[ind_g], label=g)
plt.xlabel('Month', fontsize=18)
plt.xticks(np.sort(data.Month.unique()), np.sort(data.Month.unique()))
plt.ylabel('Average delay at arrival', fontsize=18)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='upper left', bbox_to_anchor=(1, 1))
plt.savefig(os.path.join(dir_output, 'scatter_avg-delay_vs_month.pdf'), bbox_inches='tight')
plt.close
plt.figure()
ax = plt.subplot(111)
for g in np.unique(label_scatter):
ind_g = np.where(label_scatter == g)[0]
plt.scatter(number_of_flights[ind_g], average_delay[ind_g], c=color_scatter[ind_g], label=g)
plt.xlabel('Number of flights', fontsize=18)
plt.ylabel('Average delay', fontsize=18)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='upper left', bbox_to_anchor=(1, 1))
plt.savefig(os.path.join(dir_output, 'scatter_avg-delay_vs_number-of-flights.pdf'), bbox_inches='tight')
plt.close
plt.figure()
ax = plt.subplot(111)
ax_sns = sns.lmplot(data=data_number_flights_vs_delay, x='Number_of_flights', y='Average_delay', hue='Season', palette=color_seasons)
plt.xlabel('Number of flights', fontsize=18)
plt.ylabel('Average delay', fontsize=18)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='upper left', bbox_to_anchor=(1, 1))
plt.savefig(os.path.join(dir_output, 'scatterlm_avg-delay_vs_number-of-flights.pdf'), bbox_inches='tight')
plt.close
data_number_flights_vs_delay = data_number_flights_vs_delay.sort_values(by='Month')
plt.figure()
ax_sns = sns.boxplot(data=data_number_flights_vs_delay, x='Season', y='Average_delay', palette=color_seasons)
plt.xlabel('Season', fontsize=18)
plt.ylim([-15, 30])
plt.ylabel('Average delay', fontsize=18)
plt.savefig(os.path.join(dir_output, 'boxplot_avg-delay_seasons.pdf'), bbox_inches='tight')
plt.close
# perform anova for seasons and delay
fvalue_seasons, pvalue_seasons = stats.f_oneway(data_number_flights_vs_delay[data_number_flights_vs_delay.Season == 'Winter']['Average_delay'], data_number_flights_vs_delay[data_number_flights_vs_delay.Season == 'Spring']['Average_delay'], data_number_flights_vs_delay[data_number_flights_vs_delay.Season == 'Summer']['Average_delay'], data_number_flights_vs_delay[data_number_flights_vs_delay.Season == 'Autumn']['Average_delay'])
res = stat()
res.tukey_hsd(df=data_number_flights_vs_delay, res_var='Average_delay', xfac_var='Season', anova_model='Average_delay ~ C(Season)')
res.tukey_summary
# only to LAX
# plot scatter of month vs. delay at arrival and color for seasons
# plot scatter of number of flights vs. delay at arrival and color for seasons
data_to_LAX = data[data.Dest == 'LAX'].sort_values(by=['Year', 'Month'])
average_delay = list()
number_of_flights = list()
month = list()
color_scatter = list()
label_scatter = list()
for y in data_to_LAX.Year.unique():
for m in np.sort(data_to_LAX.Month.unique()):
temp_data = data_to_LAX[data_to_LAX['Year'] == y]
temp_data = temp_data[temp_data['Month'] == m]
if temp_data.shape[0] > 0:
number_of_flights.append(temp_data.shape[0])
average_delay.append(temp_data[type_delay].mean())
if m in winter_month:
color_scatter.append(color_seasons['Winter'])
label_scatter.append('Winter')
elif m in spring_month:
color_scatter.append(color_seasons['Spring'])
label_scatter.append('Spring')
elif m in summer_month:
color_scatter.append(color_seasons['Summer'])
label_scatter.append('Summer')
elif m in autumn_month:
color_scatter.append(color_seasons['Autumn'])
label_scatter.append('Autumn')
# with added jitter
month.append(m + np.random.uniform(-0.4, 0.4, 1)[0])
average_delay = np.asarray(average_delay)
number_of_flights = np.asarray(number_of_flights)
month = np.asarray(month)
color_scatter = np.asarray(color_scatter)
label_scatter = np.asarray(label_scatter)
dict_number_flights_vs_delay = {}
dict_number_flights_vs_delay['Average_delay'] = average_delay
dict_number_flights_vs_delay['Number_of_flights'] = number_of_flights
dict_number_flights_vs_delay['Month'] = month
dict_number_flights_vs_delay['Season'] = label_scatter
data_number_flights_vs_delay = | pd.DataFrame(dict_number_flights_vs_delay) | pandas.DataFrame |
import pandas as pd
import numpy as np
import tensorflow as tf
import random as rd
import matplotlib.pyplot as plt
def generate_fake_data():
df = | pd.DataFrame(columns=[0,1,2]) | pandas.DataFrame |
import json
import pandas as pd
with open("./text.txt", "r") as file:
json_dict = json.loads(file.read())
data = json_dict.get("data")
sample = data[0]
base_dict = {"id": []}
for attribute in data[0]["attributes"]:
base_dict[attribute] = []
for store in data:
base_dict["id"].append(store["id"])
for attribute in store["attributes"]:
base_dict[attribute].append(store["attributes"][attribute])
df = | pd.DataFrame.from_dict(base_dict) | pandas.DataFrame.from_dict |
import numpy as np
import struct
import pandas as pd
def read_ascii(filename, pandas=False):
"""
Takes a filename containing the text output (with headers) from xlook and
reads the columns into a rec array or dataframe object for easy data
processing and access.
"""
try:
f = open(filename, 'r')
except:
print(f'Error Opening {filename}')
return 0
col_width = 12 # Columns are 12 char wide in header
# First line of the file is the number of records
num_recs = f.readline()
num_recs = int(num_recs.strip('number of records = '))
print(f'\nNumber of records: {num_recs}')
# Second line is column numbers, we don't care so just count them
num_cols = f.readline()
num_cols = num_cols.split('col')
num_cols = len(num_cols)
print(f'Number of columns: {num_cols}')
# Third line is the column headings
col_headings_str = f.readline()
col_headings_str = col_headings_str[5:-1]
col_headings = ['row_num'] # Row number the the first (unlabeled) column
for i in range(int(len(col_headings_str)/12)):
heading = col_headings_str[12*i:12*i+12].strip()
col_headings.append(heading)
# Fourth line is column units
col_units_str = f.readline()
col_units_str = col_units_str[5:-1]
col_units = ['.']
for i in range(int(len(col_units_str)/12) + 1):
heading = col_units_str[12*i:12*i+12].strip()
col_units.append(heading)
col_units = [x for x in col_units if x != '\n'] # Remove newlines
# Fifth line is number of records per column
col_recs = f.readline()
col_recs = col_recs.split('recs')
col_recs = [int(x) for x in col_recs if x != '\n']
col_recs.insert(0, num_recs)
# Show column units and headings
print('\n\n-------------------------------------------------')
print('|%15s|%15s|%15s|' % ('Name', 'Unit', 'Records'))
print('-------------------------------------------------')
for column in zip(col_headings, col_units, col_recs):
print('|%15s|%15s|%15s|' % (column[0], column[1], column[2]))
print('-------------------------------------------------')
# Read the data into a numpy recarray
dtype = []
for name in col_headings:
dtype.append((name, 'float'))
dtype = np.dtype(dtype)
data = np.zeros([num_recs, num_cols])
i = 0
for row in f:
row_data = row.split()
for j in range(num_cols):
data[i, j] = row_data[j]
i += 1
f.close()
if pandas:
# If a pandas object is requested, make a data frame
# indexed on row number and return it
dfo = | pd.DataFrame(data, columns=col_headings) | pandas.DataFrame |
import pandas as pd
df = pd.DataFrame(columns=['p', 'Avg Trajectory Length', 'Avg T_FDG', 'Avg FDG_FG', 'Avg Cells Processed'])
record = pd.read_csv('Extra_Question_7.csv')
traj = 0
T_FDG = 0
FDG_FG = 0
cells = 0
x = 0
for index, row in record.iterrows():
x = x + 1
traj = traj + row['Trajectory Length']
T_FDG = T_FDG + (row['Trajectory Length'] / row['FDG Length'])
FDG_FG = FDG_FG + (row['FDG Length'] / row['FG Length'])
cells = cells + row['Cells Processed']
if x == 30:
df1 = pd.DataFrame([[row['p'], traj/30, T_FDG/30, FDG_FG/30, cells/30]], columns=['p', 'Avg Trajectory Length', 'Avg T_FDG', 'Avg FDG_FG', 'Avg Cells Processed'])
df = | pd.concat([df,df1]) | pandas.concat |
#!/usr/bin/env python
"""
Copyright 2021, <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from app import create_app
import pandas as pd
import json
class APITestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
self.client = self.app.test_client()
self.headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
self.df_in = pd.DataFrame(
data=[[0.3521, 55.1824, 0.8121, 2.3256]],
columns=['CNC', 'GR', 'HRD', 'ZDEN']
)
self.df_in_misordered = pd.DataFrame(
data=[[2.3256, 0.8121, 55.1824, 0.3521]],
columns=['ZDEN', 'HRD', 'GR', 'CNC']
)
self.df_in_extra_cols = pd.DataFrame(
data=[[8.5781, 0.3521, 55.1824, 0.8121, 0.78099, 6.8291, 2.3256]],
columns=['CAL', 'CNC', 'GR', 'HRD', 'HRM', 'PE', 'ZDEN']
)
self.df_in_bad_col_names = pd.DataFrame(
data=[[0.3521, 55.1824, 0.8121, 2.3256]],
columns=['Phi', 'Gamma', 'RD', 'RHOB']
)
self.df_out = pd.DataFrame(
data=[[102.225407, 196.408402]],
columns=['pred_DTC', 'pred_DTS']
)
def tearDown(self):
self.app_context.pop()
def test_get_predictions(self):
j_df = json.dumps(self.df_in.to_json(orient='split'))
response = self.client.post('api/get_predictions', data=j_df, headers=self.headers)
self.assertEqual(response.status_code, 200)
def test_get_predictions_swapped_input_cols(self):
j_df = json.dumps(self.df_in_misordered.to_json(orient='split'))
response = self.client.post('api/get_predictions', data=j_df, headers=self.headers)
df_pred = | pd.read_json(response.data, orient='split') | pandas.read_json |
import pandas as pd
cleaned_data_path = '../data/cleaned/'
prepared_data_path = '../data/prepared/'
training_set_path = '../data/training_set/'
# projects
def prepare_project_time_series(projects):
projects = split_projects_by_years(projects)
projects = split_projects_by_months(projects)
add_months_from_create_col(projects)
drop_invalid_rows(projects)
projects.to_pickle(prepared_data_path + 'projects_time_series.pkl')
return projects
def split_projects_by_years(projects):
years = pd.Series(list(range(projects.created_at.min().year,
projects.created_at.max().year + 1)), name = 'year')
return projects \
.assign(key=1).merge(years.to_frame('year').assign(key=1), on='key') \
.drop('key', 1).copy()
def split_projects_by_months(projects):
months = pd.Series(list(range(1, 13)), name = 'month')
return projects \
.assign(key=1).merge(months.to_frame('month').assign(key=1), on='key') \
.drop('key', 1).copy()
def add_months_from_create_col(projects):
projects['months_from_create'] = \
(projects['year'] - projects['created_at'].dt.year) * 12 + \
(projects['month'] - projects['created_at'].dt.month)
def drop_invalid_rows(projects):
projects.drop(projects[projects['months_from_create'] < 0].index, inplace=True)
projects = prepare_project_time_series(pd.read_pickle(cleaned_data_path + 'projects.pkl'))
print(projects)
def add_year_and_month_to_df(df):
df['year'] = df['created_at'].dt.year
df['month'] = df['created_at'].dt.month
# commits
def prepare_commits(commits):
add_year_and_month_to_df(commits)
commits = add_new_commits_col(commits)
add_total_commits_col(commits)
commits.to_pickle(prepared_data_path + 'new_commits.pkl')
return commits
def add_new_commits_col(commits):
return commits.groupby(['project_id', 'year', 'month']) \
.count().reset_index() \
.rename(columns={'created_at': 'new_commits'}) \
.drop(columns={'commit_id', 'committer_id'}, axis=1).copy()
def add_total_commits_col(commits):
total_commits_list = []
for index, row in commits.iterrows():
total_commits_list.append(commits[(((row['year'] > commits['year']) |
((row['year'] == commits['year']) & (row['month'] >= commits['month']))) &
(row['project_id'] == commits['project_id']))]['new_commits'].sum())
commits['total_commits'] = pd.Series(total_commits_list)
commits = prepare_commits(pd.read_pickle(cleaned_data_path + 'commits.pkl'))
print(commits)
# commit comments
def prepare_commit_comments(commit_comments):
commit_comments = merge_projects_into_commit_comments(commit_comments)
add_year_and_month_to_df(commit_comments)
commit_comments = add_new_commit_comments_col(commit_comments)
add_total_commit_comments(commit_comments)
commit_comments.to_pickle(prepared_data_path + 'new_commit_comments.pkl')
return commit_comments
def merge_projects_into_commit_comments(commit_comments):
commits = pd.read_pickle(cleaned_data_path + 'commits.pkl')
return pd.merge(commits, commit_comments, on=['commit_id'], sort=False) \
.drop(columns={'commit_id', 'committer_id', 'created_at_x'}) \
.rename(columns={'created_at_y': 'created_at'}).copy()
def add_new_commit_comments_col(commit_comments):
return commit_comments.groupby(['project_id', 'year', 'month']).count().reset_index() \
.drop(columns={'commit_comment_id'}) \
.rename(columns={'created_at': 'new_commit_comments'}).copy()
def add_total_commit_comments(commit_comments):
total_commit_comments_list = []
for index, row in commit_comments.iterrows():
total_commit_comments_list.append(commit_comments[(((row['year'] > commit_comments['year']) |
((row['year'] == commit_comments['year']) & (row['month'] >= commit_comments['month']))) &
(row['project_id'] == commit_comments['project_id']))]['new_commit_comments'].sum())
commit_comments['total_commit_comments'] = pd.Series(total_commit_comments_list)
commit_comments = prepare_commit_comments(pd.read_pickle(cleaned_data_path + 'commit_comments.pkl'))
print(commit_comments)
# committers
def prepare_committers(commits):
add_year_and_month_to_df(commits)
committers = add_new_committers_col(commits)
add_total_committers_col(commits, committers)
committers.to_pickle(prepared_data_path + 'unique_committers.pkl')
return committers
def add_new_committers_col(commits):
return commits.groupby(by = ['project_id', 'year', 'month'], as_index=False) \
.agg({'committer_id': pd.Series.nunique}) \
.rename(columns={'committer_id': 'unique_committers'}).copy()
def add_total_committers_col(commits, committers):
total_unique_committers = []
for index, row in committers.iterrows():
total_unique_committers.append(commits[((commits['project_id'] == row['project_id']) &
((commits['year'] < row['year']) |
((commits['year'] == row['year']) & (commits['month'] <= row['month']))))]['committer_id'].nunique())
committers['total_unique_committers'] = pd.Series(total_unique_committers)
committers = prepare_committers(pd.read_pickle(cleaned_data_path + 'commits.pkl'))
print(committers)
# issues
def prepare_issues(issues):
add_year_and_month_to_df(issues)
issues = add_new_issues_col(issues)
add_total_issues_col(issues)
issues.to_pickle(prepared_data_path + 'new_issues.pkl')
return issues
def add_new_issues_col(issues):
return issues.groupby(['project_id', 'year', 'month']).count().reset_index() \
.rename(columns={'created_at': 'new_issues'}) \
.drop(columns={'issue_id'}).copy()
def add_total_issues_col(issues):
total_issues_list = []
for index, row in issues.iterrows():
total_issues_list.append(issues[(((row['year'] > issues['year']) |
((row['year'] == issues['year']) & (row['month'] >= issues['month']))) &
(row['project_id'] == issues['project_id']))]['new_issues'].sum())
issues['total_issues'] = pd.Series(total_issues_list)
issues = prepare_issues(pd.read_pickle(cleaned_data_path + 'issues.pkl'))
print(issues)
# issue comments
def prepare_issue_comments(issue_comments):
issue_comments = merge_projects_into_issue_comments(issue_comments)
add_year_and_month_to_df(issue_comments)
issue_comments = add_new_issue_comments_col(issue_comments)
add_total_issue_comments(issue_comments)
issue_comments.to_pickle(prepared_data_path + 'new_issue_comments.pkl')
return issue_comments
def merge_projects_into_issue_comments(issue_comments):
issues = pd.read_pickle(cleaned_data_path + 'issues.pkl')
return pd.merge(issues, issue_comments, on=['issue_id'], sort=False) \
.drop(columns={'created_at_x'}) \
.rename(columns={'created_at_y': 'created_at'}).copy()
def add_new_issue_comments_col(issue_comments):
return issue_comments.groupby(['project_id', 'year', 'month']).count().reset_index() \
.drop(columns={'issue_id', 'comment_id'}) \
.rename(columns={'created_at': 'new_issue_comments'}).copy()
def add_total_issue_comments(issue_comments):
total_issue_comments_list = []
for index, row in issue_comments.iterrows():
total_issue_comments_list.append(issue_comments[(((row['year'] > issue_comments['year']) |
((row['year'] == issue_comments['year']) & (row['month'] >= issue_comments['month']))) &
(row['project_id'] == issue_comments['project_id']))]['new_issue_comments'].sum())
issue_comments['total_issue_comments'] = pd.Series(total_issue_comments_list)
issue_comments = prepare_issue_comments(pd.read_pickle(cleaned_data_path + 'issue_comments.pkl'))
print(issue_comments)
# pull requests
def pre_prepare_pull_requests(pull_requests):
add_year_and_month_to_df(pull_requests)
pull_requests = grouped_pull_requests(pull_requests)
return pull_requests
def grouped_pull_requests(pull_requests):
pull_requests = pull_requests.groupby(['project_id', 'year', 'month', 'action', 'merged']) \
.count().reset_index().dropna() \
.rename(columns={'pull_request_id': 'new_pull_requests'}) \
.drop(columns={'pull_request_history_id', 'created_at'}).copy()
pull_requests['new_pull_requests'] = pull_requests['new_pull_requests'].astype('int64')
return pull_requests
def opened_pull_requests_to_merge(pull_requests):
df = pull_requests[(pull_requests['action'] == 'opened') & (pull_requests['merged'] == 1)] \
.rename(columns={'new_pull_requests': 'new_opened_pull_requests_to_merge'}) \
.drop(columns={'action', 'merged'}).copy()
df.to_pickle(prepared_data_path + 'new_opened_pull_requests_to_merge.pkl')
return df
def merged_pull_requests(pull_requests):
df = pull_requests[(pull_requests['action'] == 'merged') & (pull_requests['merged'] == 1)] \
.rename(columns={'new_pull_requests': 'new_merged_pull_requests'}) \
.drop(columns={'action', 'merged'}).copy()
df.to_pickle(prepared_data_path + 'new_merged_pull_requests.pkl')
return df
def closed_merged_pull_requests(pull_requests):
df = pull_requests[(pull_requests['action'] == 'closed') & (pull_requests['merged'] == 1)] \
.rename(columns={'new_pull_requests': 'new_closed_merged_pull_requests'}) \
.drop(columns={'action', 'merged'}).copy()
df.to_pickle(prepared_data_path + 'new_closed_merged_pull_requests.pkl')
return df
def opened_pull_requests_to_discard(pull_requests):
df = pull_requests[(pull_requests['action'] == 'opened') & (pull_requests['merged'] == 0)] \
.rename(columns={'new_pull_requests': 'new_opened_pull_requests_to_discard'}) \
.drop(columns={'action', 'merged'}).copy()
df.to_pickle(prepared_data_path + 'new_opened_pull_requests_to_discard.pkl')
return df
def closed_unmerged_pull_requests(pull_requests):
df = pull_requests[(pull_requests['action'] == 'closed') & (pull_requests['merged'] == 0)] \
.rename(columns={'new_pull_requests': 'new_closed_unmerged_pull_requests'}) \
.drop(columns={'action', 'merged'}).copy()
df.to_pickle(prepared_data_path + 'new_closed_unmerged_pull_requests.pkl')
return df
def total_merged_pull_requests(pull_requests):
df = pull_requests[(pull_requests['merged'] == 1) &
(pull_requests['action'] == 'merged')].drop(columns={'merged', 'action'}).copy()
total_merged_pull_requests = []
for index, row in df.iterrows():
total_merged_pull_requests.append(df[
(row['project_id'] == df['project_id']) &
((row['year'] > df['year']) | ((row['year'] == df['year']) &
(row['month'] >= df['month'])))]['new_pull_requests'].sum())
df['total_merged_pull_requests'] = total_merged_pull_requests
df.drop(columns={'new_pull_requests'}, inplace=True)
df.to_pickle(prepared_data_path + 'total_merged_pull_requests.pkl')
return df
def total_unmerged_pull_requests(pull_requests):
df = pull_requests[(pull_requests['merged'] == 0) &
(pull_requests['action'] == 'closed')].drop(columns={'merged', 'action'}).copy()
total_unmerged_pull_requests = []
for index, row in df.iterrows():
total_unmerged_pull_requests.append(df[
(row['project_id'] == df['project_id']) &
((row['year'] > df['year']) | ((row['year'] == df['year']) &
(row['month'] >= df['month'])))]['new_pull_requests'].sum())
df['total_unmerged_pull_requests'] = total_unmerged_pull_requests
df.drop(columns={'new_pull_requests'}, inplace=True)
df.to_pickle(prepared_data_path + 'total_unmerged_pull_requests.pkl')
return df
pull_requests = pre_prepare_pull_requests(pd.read_pickle(cleaned_data_path + 'pull_requests_with_history.pkl'))
print(pull_requests)
opened_pull_requests_to_merge = opened_pull_requests_to_merge(pull_requests)
print(opened_pull_requests_to_merge)
merged_pull_requests = merged_pull_requests(pull_requests)
print(merged_pull_requests)
closed_merged_pull_requests = closed_merged_pull_requests(pull_requests)
print(closed_merged_pull_requests)
opened_pull_requests_to_discard = opened_pull_requests_to_discard(pull_requests)
print(opened_pull_requests_to_discard)
closed_unmerged_pull_requests = closed_unmerged_pull_requests(pull_requests)
print(closed_unmerged_pull_requests)
total_merged_pull_requests = total_merged_pull_requests(pull_requests)
print(total_merged_pull_requests)
total_unmerged_pull_requests = total_unmerged_pull_requests(pull_requests)
print(total_unmerged_pull_requests)
# pull request comments
def prepare_pull_request_comments(pull_request_comments):
pull_request_comments = merge_projects_into_pull_request_comments(pull_request_comments)
add_year_and_month_to_df(pull_request_comments)
pull_request_comments = add_new_pull_request_comments_col(pull_request_comments)
add_total_pull_request_comments(pull_request_comments)
pull_request_comments.to_pickle(prepared_data_path + 'new_pull_request_comments.pkl')
return pull_request_comments
def merge_projects_into_pull_request_comments(pull_request_comments):
pull_requests = pd.read_pickle(cleaned_data_path + 'pull_requests.pkl')
return pd.merge(pull_requests, pull_request_comments, on=['pull_request_id'], sort=False) \
.drop(columns={'pull_request_id', 'merged', 'comment_id'}).copy()
def add_new_pull_request_comments_col(pull_request_comments):
return pull_request_comments.groupby(['project_id', 'year', 'month']).count().reset_index() \
.rename(columns={'created_at': 'new_pull_request_comments'}).copy()
def add_total_pull_request_comments(pull_request_comments):
total_pull_request_comments = []
for index, row in pull_request_comments.iterrows():
total_pull_request_comments \
.append(pull_request_comments[(((row['year'] > pull_request_comments['year']) |
((row['year'] == pull_request_comments['year']) & (row['month'] >= pull_request_comments['month']))) &
(row['project_id'] == pull_request_comments['project_id']))]['new_pull_request_comments'].sum())
pull_request_comments['total_pull_request_comments'] = pd.Series(total_pull_request_comments)
pull_request_comments = prepare_pull_request_comments(pd.read_pickle(cleaned_data_path + 'pull_request_comments.pkl'))
print(pull_request_comments)
# watchers
def prepare_watchers(watchers):
add_year_and_month_to_df(watchers)
watchers = add_new_watchers_col(watchers)
add_total_watchers_col(watchers)
watchers.to_pickle(prepared_data_path + 'new_watchers.pkl')
return watchers
def add_new_watchers_col(watchers):
return watchers.groupby(['project_id', 'year', 'month']).count().reset_index() \
.rename(columns={'created_at': 'new_watchers'}).copy()
def add_total_watchers_col(watchers):
total_watchers = []
for index, row in watchers.iterrows():
total_watchers.append(watchers[(row['project_id'] == watchers['project_id']) &
((row['year'] > watchers['year']) |
((row['year'] == watchers['year']) &
(row['month'] > watchers['month'])))]['new_watchers'].sum())
watchers['total_watchers'] = pd.Series(total_watchers)
watchers = prepare_watchers( | pd.read_pickle(cleaned_data_path + 'watchers.pkl') | pandas.read_pickle |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-05-22 17:45
# @Author : erwin
import datetime
import time
import pandas as pd
from component.demo_opentsdb.opentsdb_conn import OpenTSDBClient
from machine_learning.similarity.dtw.hierarchical_helper import HierarchicalHelper
from common.pickle_helper import store_model
| pd.set_option('display.max_columns', 1000) | pandas.set_option |
from Clean_Fixture_DF import Fixture_DF
import pandas as pd
# These standings are for the Premier League Teams
# This is STEP 2 of the process to create the new season file
fix = Fixture_DF()
class Standings:
def __init__(self):
self.standings = pd.DataFrame()
self.standings["Team"] = fix.team_list
self.standings["MP"] = None
self.standings["W"] = None
self.standings["D"] = None
self.standings["L"] = None
self.standings["Pts"] = None
self.standings["GF"] = None
self.standings["GA"] = None
self.standings["GD"] = None
self._win()
self._loss()
self._draw()
self._points()
self._matches_played()
self._goals_for()
self._goals_against()
self._goal_difference()
self.standings = (self.standings.sort_values(["Pts", "GD"], ascending=False)).reset_index(drop=True)
def _win(self):
n = 0
for i in fix.team_list:
t = (fix.fixture_list_df[(fix.fixture_list_df["Home"] == f"{i}") | (fix.fixture_list_df["Away"] == f"{i}")])
wins = len(t[t["Winner"] == f"{i}"])
self.standings.loc[n, "W"] = wins
n += 1
def _loss(self):
n = 0
for i in fix.team_list:
t = (fix.fixture_list_df[(fix.fixture_list_df["Home"] == f"{i}") | (fix.fixture_list_df["Away"] == f"{i}")])
wins = len(t[t["Loser"] == f"{i}"])
self.standings.loc[n, "L"] = wins
n += 1
def _draw(self):
n = 0
for i in fix.team_list:
t = (fix.fixture_list_df[(fix.fixture_list_df["Home"] == f"{i}") | (fix.fixture_list_df["Away"] == f"{i}")])
wins = len(t[t["Winner"] == "Tie"])
self.standings.loc[n, "D"] = wins
n += 1
def _points(self):
self.standings["Pts"] = (self.standings["W"] * 3) + (self.standings["D"])
def _matches_played(self):
self.standings["MP"] = self.standings["W"] + self.standings["D"] + self.standings["L"]
def _goals_for(self):
n = 0
for i in fix.team_list:
goals = 0
# Home Goals For
h = (fix.fixture_list_df[(fix.fixture_list_df["Home"] == f"{i}")])
h = h.reset_index(drop=True)
for row in range(len(h)):
s = h["Score"][row]
goals += int(s[0])
# Away Goals For
a = (fix.fixture_list_df[(fix.fixture_list_df["Away"] == f"{i}")])
a = a.reset_index(drop=True)
for r in range(len(a)):
s = a["Score"][r]
goals += int(s[2])
self.standings.loc[n, "GF"] = goals
n += 1
def _goals_against(self):
n = 0
for i in fix.team_list:
goals = 0
# Home Goals Against
h = (fix.fixture_list_df[(fix.fixture_list_df["Home"] == f"{i}")])
h = h.reset_index(drop=True)
for row in range(len(h)):
s = h["Score"][row]
goals += int(s[2])
# Away Goals Against
a = (fix.fixture_list_df[(fix.fixture_list_df["Away"] == f"{i}")])
a = a.reset_index(drop=True)
for r in range(len(a)):
s = a["Score"][r]
goals += int(s[0])
self.standings.loc[n, "GA"] = goals
n += 1
def _goal_difference(self):
self.standings["GD"] = self.standings["GF"] - self.standings["GA"]
class xStandings(Standings):
def __init__(self):
super().__init__()
self.xStandings = | pd.DataFrame() | pandas.DataFrame |
"""
Pull my Garmin sleep data via json requests.
This script was adapted from: https://github.com/kristjanr/my-quantified-sleep
The aforementioned code required the user to manually define
headers and cookies. It also stored all of the data within Night objects.
My modifications include using selenium to drive a Chrome browser. This avoids
the hassle of getting headers and cookies manually (the cookies would have to be updated
everytime the Garmin session expired). It also segments data requests because
Garmin will respond with an error if more than 32 days are requested at once. Lastly,
data is stored as a pandas dataframe and then written to a user-defined directory
as a pickle file.
Data is this processed and merged with older data from my Microsft smartwatch.
The merged data is also saved as pandas dataframes in pickle files.
Lastly, sunrise and sunset data is downloaded for all days in the sleep dataset.
This data is also archived as a pandas dataframe and saved as a pickle file.
The data update process hs been broken into steps so that progress can be passed
to the Dash app.
"""
# import base packages
import datetime, json, os, re, sys
from itertools import chain
from os.path import isfile
# import installed packages
import pytz, requests, chardet, brotli
import numpy as np
import pandas as pd
from pandas.tseries.holiday import USFederalHolidayCalendar as calendar
from seleniumwire import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
# input variables
if os.name == "nt":
# running on my local Windows machine
ENV = "local"
else:
# running on heroku server
ENV = "heroku"
if ENV == "local":
proj_path = "C:/Users/adiad/Anaconda3/envs/SleepApp/sleep_app/" # read/write data dir
else:
proj_path = ""
GOOGLE_CHROME_PATH = '/app/.apt/usr/bin/google-chrome'
CHROMEDRIVER_PATH = '/app/.chromedriver/bin/chromedriver'
garmin_results_pkl_fn = "data/garmin_sleep_df.pkl" # name of pickle file to archive (combining new results with any previous Garmin) for easy updating and subsequent processing
garmin_results_json_fn = "data/new_garmin_sleep.json" # name of json file with only new raw results
garmin_results_csv_fn = "data/garmin_sleep_df.csv" # name of csv file to archive (combining new results with any previous)
all_descr_results_fn = "data/all_sleep_descr_df.pkl" # name of pickle file combining all Garmin & Microsift sleep session description data
all_event_results_fn = "data/all_sleep_event_df.pkl" # name of pickle file combining all Garmin & Microsoft event data
sun_pkl_fn = "data/sun_df.pkl" # name of pickel file to archive sunrise/sunset data
local_tz = "US/Eastern" # pytz local timezone for sunrise/sunset time conversion
sun_lat = 39.76838 # latitude where sunrise/sunset times are derived from
sun_lon = -86.15804 # longitude where sunrise/sunset times are derived from
run_browser_headless = False # will hide Firefox during execution if True
browser_action_timeout = 60 # max time (seconds) for browser wait operations
start_date = '2017-03-01' # first date to pull sleep data
end_date = str(datetime.date.today() - datetime.timedelta(days=1)) # last date to pull sleep data
user_name = "email address" # Garmin username
password = "password" # Garmin password
signin_url = "https://connect.garmin.com/signin/" # Garmin sign-in webpage
sleep_url_base = "https://connect.garmin.com/modern/sleep/" # Garmin sleep base URL (sans date)
sleep_url_json_req = "https://connect.garmin.com/modern/proxy/wellness-service/wellness/dailySleepsByDate"
def download(start_date, end_date, headers, session_id):
params = (
('startDate', start_date),
('endDate', end_date),
('_', session_id),
)
response = requests.get(sleep_url_json_req, headers=headers, params=params)
if response.status_code != 200:
print("RESPONSE ERROR RECEIVED:")
print('Status code: %d' % response.status_code)
response_dict = json.loads(response.content.decode('UTF-8'))
print('Content: %s' % response_dict["message"])
raise Exception
return response
def download_to_json(start_date, end_date, headers, session_id):
response = download(start_date, end_date, headers, session_id)
# most responses are in ascii (no encoding)
# sporadically a response will have brotli encoding
#print("The response is encoded with:", chardet.detect(response.content))
if chardet.detect(response.content)["encoding"] == 'ascii':
return json.loads(response.content)
else:
return brotli.decompress(response.content)
def converter(data, return_df=True):
# define functions which pass through None value because
# datetime functions don't accept value None
def sleep_timestamp(val):
if val is None:
return None
else:
return datetime.datetime.fromtimestamp(val / 1000, pytz.utc)
def sleep_timedelta(val):
if val is None:
return None
else:
return datetime.timedelta(seconds=val)
# initialize variables
if return_df:
nights = pd.DataFrame(columns=["Prev_Day", "Bed_Time", "Wake_Time",
"Awake_Dur", "Light_Dur", "Deep_Dur",
"Total_Dur", "Nap_Dur", "Window_Conf"])
i = 0
else:
nights = []
for d in data:
bed_time = sleep_timestamp(d['sleepStartTimestampGMT'])
wake_time = sleep_timestamp(d['sleepEndTimestampGMT'])
previous_day = datetime.date(*[int(datepart) for datepart in d['calendarDate'].split('-')]) - datetime.timedelta(days=1)
deep_duration = sleep_timedelta(d['deepSleepSeconds'])
light_duration = sleep_timedelta(d['lightSleepSeconds'])
total_duration = sleep_timedelta(d['sleepTimeSeconds'])
awake_duration = sleep_timedelta(d['awakeSleepSeconds'])
nap_duration = sleep_timedelta(d['napTimeSeconds'])
window_confirmed = d['sleepWindowConfirmed']
if return_df:
nights.loc[i] = [previous_day, bed_time, wake_time, awake_duration,
light_duration, deep_duration, total_duration,
nap_duration, window_confirmed]
i += 1
else:
night = Night(bed_time, wake_time, previous_day, deep_duration,
light_duration, total_duration, awake_duration)
nights.append(night, sort=True)
return nights
# this function returns a list of all dates in [date1, date2]
def daterange(date1, date2):
date_ls = [date1]
for n in range(int((date2 - date1).days)):
date_ls.append(date_ls[-1] + datetime.timedelta(days=1))
return date_ls
# steps to updating sleep data:
# Step 0: determine which dates are missing in the archived Garmin dataset,
# given the input start & end dates
# Step 1: Login to connect.garmin.com, get user setting credentials
# Step 2: Using credentials, download missing data from Garmin in json
# Step 3: process new Garmin data, merge it with archived data
# Step 4: download sunrise/sunset data for new dates and merge with archived data
def step0():
# make a list of all dates from first sleep date to last (fills any missing dates)
req_dates_ls = daterange(
datetime.datetime.strptime(start_date, "%Y-%m-%d").date(),
datetime.datetime.strptime(end_date, "%Y-%m-%d").date()
)
# Look for previous results
if isfile(proj_path + garmin_results_pkl_fn):
nights_df = pd.read_pickle(proj_path + garmin_results_pkl_fn)
else:
nights_df = pd.DataFrame()
# if previous results were found, reduce requested dates to those not yet obtained
if len(nights_df) > 0:
# get list of requested dates not yet obtained
archive_dates_ls = list(nights_df["Prev_Day"])
new_req_dates_ls = np.setdiff1d(req_dates_ls, archive_dates_ls)
else:
new_req_dates_ls = req_dates_ls
#print("Archive max: ", max(archive_dates_ls))
#print("Request max: ", max(req_dates_ls))
if len(new_req_dates_ls) == 0:
msg = "Archived data is up to date, no new data is available"
else:
msg = "Current data was checked and " + str(len(new_req_dates_ls)) + " night(s) are needed"
return [msg, nights_df, new_req_dates_ls]
def step1():
opts = webdriver.ChromeOptions()
opts.add_argument('--disable-gpu')
opts.add_argument('--no-sandbox')
opts.add_argument('--disable-dev-shm-usage')
if ENV == "local":
if run_browser_headless:
opts.addArgument("headless")
assert opts.headless # Operating in headless mode
else:
opts.binary_location = GOOGLE_CHROME_PATH
# open firefox and goto Garmin's sign-in page
print("Opening Chrome browser")
driver = webdriver.Chrome(chrome_options=opts)
driver.get(signin_url)
# wait until sign-in fields are visible
wait = WebDriverWait(driver, browser_action_timeout)
wait.until(ec.frame_to_be_available_and_switch_to_it(("id","gauth-widget-frame-gauth-widget")))
wait.until(ec.presence_of_element_located(("id","username")))
# write login info to fields, then submit
print("Signing in to connect.garmin.com")
element = driver.find_element_by_id("username")
driver.implicitly_wait(5)
element.send_keys(user_name)
element = driver.find_element_by_id("password")
element.send_keys(password)
element.send_keys(Keys.RETURN)
wait.until(ec.url_changes(signin_url)) # wait until landing page is requested
driver.switch_to.default_content() # get out of iframe
# get dummy webpage to obtain all request headers
print("Loading dummy page to obtain headers")
driver.get(sleep_url_base + start_date)
request = driver.wait_for_request(sleep_url_base + start_date,
timeout=browser_action_timeout)
if (request.response.status_code != 200) | (~ hasattr(request, "headers")):
print("RESPONSE ERROR RECEIVED:")
if (request.response.status_code != 200):
print("Status code: %d" % request.response.status_code)
#response_dict = json.loads(request.content.decode('UTF-8'))
print("Reason: ", request.response.reason)
if (~ hasattr(request, "headers")):
print("Request did not have 'headers' attribute")
print("Request attributes: ", dir(request))
print("Request headers: ", request.headers)
#raise Exception
# close the Firefox browser
driver.close()
msg = "Logged in to connect.garmin.com"
return [msg, request]
def step2(request, new_req_dates_ls):
# transfer request headers
headers = {
"cookie": request.headers["Cookie"],
"referer": sleep_url_base + start_date,
"accept-encoding": request.headers["Accept-Encoding"],
"accept-language": "en-US", # request.headers["Accept-Language"],
"user-agent": request.headers["User-Agent"],
#"nk": "NT",
"accept": request.headers["Accept"],
"authority": request.headers["Host"],
#"x-app-ver": "4.25.3.0",
"upgrade-insecure-requests": request.headers["Upgrade-Insecure-Requests"]
}
# get the session id from the headers
re_session_id = re.compile("(?<=\$ses_id:)(\d+)")
session_id = re_session_id.search(str(request.headers)).group(0)
# Garmin will throw error if request time span exceeds 32 days
# therefore, request 32 days at a time
max_period_delta = datetime.timedelta(days=31)
data = [] # list of jsons, one per time period
get_dates_ls = new_req_dates_ls
while len(get_dates_ls) > 0:
period_start = min(get_dates_ls)
if (max(get_dates_ls) - period_start) > (max_period_delta - datetime.timedelta(days=1)):
period_end = period_start + max_period_delta
else:
period_end = max(get_dates_ls)
# note, this may request some dates which were already obtained
# since a contiguous period is being requested rather than 32 new dates
# duplicated dates will be dropped later
print("Getting data for period: [%s, %s]" % (period_start, period_end))
data.append(download_to_json(period_start, period_end, headers, session_id))
# trim dates list
get_dates_ls = [d for d, s in zip(get_dates_ls, np.array(get_dates_ls) > period_end) if s]
# combine list of jsons into one large json
data = list(chain.from_iterable(data))
# save raw Garmin json to project folder
with open(proj_path + garmin_results_json_fn, 'w') as fp:
json.dump(data, fp)
msg = "Data has been downloaded from Garmin"
return [msg, data]
def step3(nights_df, data, new_req_dates_ls):
# clean the new garmin data
new_nights_df = converter(data)
new_nights_df["Prev_Day"] = pd.to_datetime(new_nights_df["Prev_Day"])
if pd.to_datetime(new_nights_df["Bed_Time"]).dt.tz is None:
new_nights_df["Bed_Time"] = pd.to_datetime(new_nights_df["Bed_Time"]). \
dt.tz_localize(local_tz)
else:
new_nights_df["Bed_Time"] = pd.to_datetime(new_nights_df["Bed_Time"]). \
dt.tz_convert(local_tz)
if pd.to_datetime(new_nights_df["Wake_Time"]).dt.tz is None:
new_nights_df["Wake_Time"] = pd.to_datetime(new_nights_df["Wake_Time"]). \
dt.tz_localize(local_tz)
else:
new_nights_df["Wake_Time"] = pd.to_datetime(new_nights_df["Wake_Time"]). \
dt.tz_convert(local_tz)
new_nights_df["Light_Dur"] = pd.to_timedelta(new_nights_df["Light_Dur"], "days")
new_nights_df["Deep_Dur"] = pd.to_timedelta(new_nights_df["Deep_Dur"], "days")
new_nights_df["Total_Dur"] = pd.to_timedelta(new_nights_df["Total_Dur"], "days")
new_nights_df["Nap_Dur"] = pd.to_timedelta(new_nights_df["Nap_Dur"], "days")
# fill df with missing dates so that subsequent updates won't keep
# requesting data which Garmin doesn't have
new_missing_dates_ls = np.setdiff1d(new_req_dates_ls, new_nights_df["Prev_Day"].dt.date)
new_missing_row = [pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, np.NAN]
for d in new_missing_dates_ls:
new_nights_df.loc[len(new_nights_df)] = [d] + new_missing_row
# drop any nights which were already in the archived pickle file,
# then merge it with archived data
if len(nights_df) > 0:
new_nights_df = new_nights_df[~new_nights_df["Prev_Day"].isin(nights_df["Prev_Day"])]
nights_df = nights_df.append(new_nights_df, sort=True).sort_values("Prev_Day", axis=0)
else:
nights_df = new_nights_df.sort_values("Prev_Day", axis=0)
# trim most recent nights which have NaT durations because they were likely caused
# by the smartwatch not yet having synced with Garmin for those dates
unknown_nights_ls = []
i = 1
while pd.isnull(nights_df.Total_Dur.iloc[-i]) & (len(nights_df) >= i):
unknown_nights_ls.append(nights_df.Prev_Day.iloc[-i])
i += 1
nights_df = nights_df[~nights_df["Prev_Day"].isin(unknown_nights_ls)]
# save merged results
#nights_df.to_csv(proj_path + garmin_results_csv_fn)
nights_df.to_pickle(proj_path + garmin_results_pkl_fn)
# clean garmin data for dashboard
garmin_df = nights_df.drop(["Nap_Dur", "Window_Conf"], axis=1)
# calculate time of day in decimal hours of each event (asleep & wake)
garmin_df["Bed_ToD"] = garmin_df["Bed_Time"].dt.hour + garmin_df["Bed_Time"].dt.minute/60
garmin_df["Bed_ToD"] -= 24*(garmin_df["Bed_ToD"] > 12) # make PM bed times negative
garmin_df["Wake_ToD"] = garmin_df["Wake_Time"].dt.hour + garmin_df["Wake_Time"].dt.minute/60
# read & wrangle old microsoft sleep data
ms2015_df = pd.read_csv(proj_path + "data/Activity_Summary_20150101_20151231.csv")
ms2016_df = | pd.read_csv(proj_path + "data/Activity_Summary_20160101_20161231.csv") | pandas.read_csv |
def columns_datatypes(myDataFrame):
import numpy as np
import pandas as pd
"""
This function goal is to generate a dictionnary which build some metadatas
about the column datatypes from the entry DataFrame.
RESULT : a dictionnary with 2 keys:
- 'ColumnTypes' gives a summary of each column datatype recognized within
the DataFrame : its values are the lists of the columns whose type match
with the dictionary key.
- 'CountTypes' gives the number of columns matching with each datatype.
PARAMS :
- 'MyDataFrame' : The entry DataFrame
EXAMPLE :
columns_datatypes(df)
>>
{'ColumnType': {
'float64': ['CustomerID'],
'int64': ['Quantity'],
'object': ['InvoiceNo','StockCode','Description',
'InvoiceDate','UnitPrice','Country']
},
'CountType': {
'float64': 1,
'int64': 1,
'object': 6
}
}
"""
#We list the datatypes recognized by a pandas DataFrame.
L_dtypes = ['float64','int64','object','datetime64[ns]','float32','bool','int8']
L_emptyKeys = []
dict_dtypes = {'float64' :[],
'int64' :[],
'object' :[],
'datetime64[ns]' :[],
'float32' :[],
'bool' :[],
'int8' :[],
'dtypeReject' : []}
present_types = {}
df_dtypes = pd.DataFrame(myDataFrame.dtypes,columns=['datatype'])
for columnName in df_dtypes.index :
datum = df_dtypes.loc[columnName,'datatype']
datum = str(datum)
if datum in L_dtypes:
dict_dtypes[datum].append(columnName)
else :
dict_dtypes['dtypeReject'].append(columnName)
for datatype in dict_dtypes:
if len(dict_dtypes[datatype])>0:
present_types[datatype]=len(dict_dtypes[datatype])
else:
L_emptyKeys.append(datatype)
for datatypekey in L_emptyKeys :
del dict_dtypes[datatypekey]
return({'ColumnType':dict_dtypes , 'CountType':present_types})
def content_analysis(myDataFrame):
import numpy as np
import pandas as pd
"""
This function goal is to generate a DataFrame which contains metadatas about
a DataFrame content.
IMPORTANT : This function uses the output of the 'columns_datatypes'
function so you need both functions for using this one.
RESULT : A DataFrame which contains metadatas about the entry DataFrame :
- 'nullCount' : number of missing values
- 'percent_missing' : percent of missing values compared to the DataFrame
lenght
- 'Unique' : number of unique values.
- 'Unique_percent' : percent of unique values compared to the DataFrame
lenght
- 'Datatype' : datatype recocgnized for each columns
- 'col_missing>10','col_missing>20','col_missing>30','col_missing>40' : 4 columns
which contains 'r' if the percent of missing values are respectively under 10%,
20%, 30% and 40% or 'g' on the other hand : this is useful for plotting
the missing values by columns with colors.
PARAMS :
- 'MyDataFrame' : The entry DataFrame
"""
Empty_List = []
Unique_values = []
myDataFrame_len = len(myDataFrame)
for column in list(myDataFrame):
Empty_List.append(len(myDataFrame[column].unique()))
#We build the Dataframe of unique values percents :
UniqueDataframe = pd.DataFrame(Empty_List,index=list(myDataFrame),columns=['Unique'])
UniqueDataframe['Unique_percent'] = (UniqueDataframe['Unique']/myDataFrame_len)*100
DataTypeDataFrame = pd.DataFrame([],index=list(myDataFrame),columns=['Datatype'])
for datatype in columns_datatypes(myDataFrame)['ColumnType'].keys():
columnList = columns_datatypes(myDataFrame)['ColumnType'][datatype]
for columnName in columnList:
DataTypeDataFrame.set_value(columnName,'Datatype',datatype)
#We build the summary DataFrame :
SummaryDataFrame = pd.DataFrame(myDataFrame.isnull().sum(),columns=['nullCount'])
SummaryDataFrame['percent_missing']=np.nan
SummaryDataFrame['percent_missing']=(SummaryDataFrame['nullCount']/myDataFrame_len)*100
L_null = SummaryDataFrame[SummaryDataFrame['nullCount'] == myDataFrame_len].index.tolist()
SummaryDataFrame = pd.concat([SummaryDataFrame,UniqueDataframe,DataTypeDataFrame],axis=1)
for criterium in range(10,41,10):
missing_col_criterium = "col_missing>%s"%criterium
SummaryDataFrame[missing_col_criterium] = np.where(SummaryDataFrame['percent_missing']>criterium, 'r', 'g')
return(SummaryDataFrame)
def identify_my_quantiles(my_rfmd_DataFrame,my_quantile_column):
"""
This function goal is to build an identifier DataFrame (which will be used
in the "scatter_plot" function).
Based on RFMD quartiles, the function returns a "main_category" and a
"color" columns.
RESULT : A DataFrame which contains a "main_category" and a "color" columns
PARAMS :
- 'my_rfmd_DataFrame' refers to the dataframe we want to identify : it must
contain the column we want to flag as category and color
- 'my_quantile_column' refers to the column we want to flag
"""
import pandas as pd
L_quantiles =list(my_rfmd_DataFrame[my_quantile_column])
df_quantiles = pd.DataFrame(L_quantiles,columns=['color'])
df_quantiles['main_category']=df_quantiles['color'].astype(str)
return df_quantiles
def RScore(x,param,dictionary):
"""
This function goal is to build a column of quartiles (1,2,3 or 4) based on a
continuous feature values.
The more the feature is high, the more the quartile returned is low
RESULT : A new DataFrame column which contains the quartiles applied to the
continuous feature.
PARAMS :
- 'x' refers to the feature
- 'param' refers to the key we want to use in our dictionnary of quartiles
- 'dictionary' refers to a dictionary of quartiles
example :
quantiles = {
'recency': {0.25: 16.0, 0.5: 60.0, 0.75: 149.0}
}
my_dataframe['r_quartile'] = my_dataframe['recency'].apply(RScore, args=('recency',quantiles,))
"""
if x <= dictionary[param][0.25]:
return 1
elif x <= dictionary[param][0.50]:
return 2
elif x <= dictionary[param][0.75]:
return 3
else:
return 4
def FMScore(x,param,dictionary):
"""
This function goal is to build a column of quartiles (1,2,3 or 4) based on a
continuous feature values.
The more the feature is high, the more the quartile returned is high
RESULT : A new DataFrame column which contains the quartiles applied to the
continuous feature.
PARAMS :
- 'x' refers to the feature
- 'param' refers to the key we want to use in our dictionnary of quartiles
- 'dictionary' refers to a dictionary of quartiles
example :
quantiles = {
'density': {0.25: 73.0, 0.5: 133.2, 0.75: 230.53125},
'monetary_value': {0.25: 265.9249999999999, 0.5: 580.05, 0.75: 1404.515},
'recency': {0.25: 16.0, 0.5: 60.0, 0.75: 149.0}
}
my_dataframe['d_quartile'] = my_dataframe['density'].apply(RScore, args=('density',quantiles,))
"""
if x <= dictionary[param][0.25]:
return 4
elif x <= dictionary[param][0.50]:
return 3
elif x <= dictionary[param][0.75]:
return 2
else:
return 1
def apply_quartiles(myDataFrame,myMappingColumnDict,myQuartileFunction,QuantilesDict,bool_convert_in_percent):
"""
This function goal is to compute quartiles from a DataFrame continuous
columns.
IMPORTANT : You need a function which compute quartiles (RScore or FMScore)
if you want to use this fuction.
RESULT : The entry DataFrame with quartiles computed
PARAMS :
- 'myDataFrame' refers to the entry DataFrame
- 'myMappingColumnDict' refers to a dictionary which maps the entry DataFrame
columns names with the new names we want in the function output
- 'myQuartileFunction' refers to the function we want to use in order to
compute the quariles (RScore or FMScore)
- 'QuantilesDict' refers to the quantiles dictionnary we want to use in order
to apply the transform.
- 'bool_convert_in_percent' is a boolean attributes that specify if we want or
not convert the quartiles in percents.
"""
myTempDataFrame = myDataFrame.copy()
for column in myMappingColumnDict.keys():
new_column_name = myMappingColumnDict[column]
myDataFrame[new_column_name] = myDataFrame[column].apply(myQuartileFunction, args=(column,QuantilesDict,))
if bool_convert_in_percent == True:
myDataFrame[new_column_name] = 100*(1/ myDataFrame[new_column_name])
myDataFrame[new_column_name][myTempDataFrame[column]==0] = 0
del myTempDataFrame
return myDataFrame
def values_to_col(myDataFrame,myColumnList,bool_with_old_col_name):
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
"""
This function goal is to treat categorical features in a pandas DataFrame
list of columns:
From a categorical column 'CC' which contains 'N' attributes
[att1, att2, att3,..., attn ] we create N new vectors/features/columns :
- row to row, if the category was present at the intersection of 'CC'
and the row,then the value at the intersection of the row and the new
column is 1
- else, the value at the intersection of the row and the new column is 0
The relation between rows and columns is kept
RESULT : The entry DataFrame with the new categorical vectors :
2 new columns are also created :
- 'created_columns' : a column with a list of all the new created columns
- 'dict_mapping' : a column with a dictionary which maps the old columns
with the columns they generated
PARAMS :
- 'myDataFrame' refers to the DataFrame we interest in
- 'myColumnList' refers to the list of columns (the list can have only one
value but it must be a list) we want to vectorize
- 'bool_with_old_col_name' is a boolean attribute that specify if we want to
keep the old columns names or not :
--> example : with old names, new columns are :
CC_att1, CC_att2, CC_att3,..., CC_attn
--> example : without old names : att1, att2, att3,..., attn
"""
created_columns = []
dict_mapping = {}
for column in myColumnList:
#Missing values filling
myDataFrame[column].fillna('none', inplace=True)
newFeatures = []
corpus = myDataFrame[column]
vectorizer = CountVectorizer(min_df=1,max_df=1.0)
#Construction of the row/words Matrix
X = vectorizer.fit_transform(corpus).toarray()
feature_names = vectorizer.get_feature_names()
for feature in feature_names:
if bool_with_old_col_name==True:
newFeatureName = '%s_%s'%(column,feature)
else:
newFeatureName = feature
newFeatures.append(newFeatureName)
created_columns.append(newFeatureName)
if column in dict_mapping :
dict_mapping[column].append(newFeatureName)
else:
dict_mapping[column] = [newFeatureName]
#Construction of the row/words DataFrame
myfeaturedf = pd.DataFrame(X,columns=newFeatures)
myDataFrame = pd.concat([myDataFrame, myfeaturedf], axis=1, join_axes=[myfeaturedf.index])
myDataFrame['created_columns']=[created_columns]*len(myDataFrame)
myDataFrame['dict_mapping']=[dict_mapping]*len(myDataFrame)
return myDataFrame
def vectors_metadata(myVectorizedDataFrame,argument):
"""
This function goal is to build some metadatas that summarizes the effects
of the 'values_to_col' myQuartileFunction
IMPORTANT : it is not possible to use it before the use of the
'values_to_col' function because we need the 'created_columns' and
'dict_mapping' columns
RESULT : A summary of the vectors creation resulting from the 'values_to_col'
function
PARAMS :
- 'myVectorizedDataFrame' refers to the output DataFrame of the
- 'values_to_col' function'argument' refers to the granularity of the
metatadas built from the entry DataFrame:
--> if 'argument'='summary' : the metadatas will concern the impact of the
'values_to_col' function for each old column in the list 'myColumnList'
--> if 'argument'='global' : the metadatas will concern the impact of the
'values_to_col' function for each old column in the list 'myColumnList'
and each new column created
"""
L_vectorized_features = myVectorizedDataFrame['created_columns'][0]
vectorsMapping = myVectorizedDataFrame['dict_mapping'][0]
df_mapping_vect = pd.DataFrame(columns=['key','value'])
for key in vectorsMapping.keys():
df_temp = pd.DataFrame(vectorsMapping[key],columns=['value'])
df_temp['key'] = key
frames = [df_mapping_vect,df_temp]
result = pd.concat(frames)
df_mapping_vect = result
df_VectorAnalysis = VectorAnalysis(myVectorizedDataFrame[L_vectorized_features])
df_VectorAnalysis['value'] = df_VectorAnalysis.index
df_VectorAnalysis = pd.merge(df_VectorAnalysis, df_mapping_vect, how='left', on=['value'])
L_metadata_cols = ['number_of_vectors','max_1_Count','min_1_Count','average_1_Count','%10_quantile_1_Count',
'%25_quantile_1_Count','%30_quantile_1_Count','median_quantile_1_Count']
for col in L_metadata_cols :
df_VectorAnalysis[col] = np.nan
for key in vectorsMapping.keys():
df_VectorAnalysis['number_of_vectors'] = np.where((df_VectorAnalysis['key']== key )
, len(df_VectorAnalysis['Vect_1_Count'][(df_VectorAnalysis['key']==key)]),
df_VectorAnalysis['number_of_vectors'])
df_VectorAnalysis['max_1_Count'] = np.where((df_VectorAnalysis['key']== key )
, max(df_VectorAnalysis['Vect_1_Count'][df_VectorAnalysis['key']==key]),
df_VectorAnalysis['max_1_Count'])
df_VectorAnalysis['min_1_Count'] = np.where((df_VectorAnalysis['key']== key )
, min(df_VectorAnalysis['Vect_1_Count'][df_VectorAnalysis['key']==key]),
df_VectorAnalysis['min_1_Count'])
df_VectorAnalysis['average_1_Count'] = np.where((df_VectorAnalysis['key']== key )
, np.mean(df_VectorAnalysis['Vect_1_Count'][df_VectorAnalysis['key']==key]),
df_VectorAnalysis['average_1_Count'])
df_VectorAnalysis['%10_quantile_1_Count'] = np.where((df_VectorAnalysis['key']== key )
, df_VectorAnalysis['Vect_1_Count'][df_VectorAnalysis['key']==key].quantile(0.1),
df_VectorAnalysis['%10_quantile_1_Count'])
df_VectorAnalysis['%25_quantile_1_Count'] = np.where((df_VectorAnalysis['key']== key )
, df_VectorAnalysis['Vect_1_Count'][df_VectorAnalysis['key']==key].quantile(0.25),
df_VectorAnalysis['%25_quantile_1_Count'])
df_VectorAnalysis['%30_quantile_1_Count'] = np.where((df_VectorAnalysis['key']== key )
, df_VectorAnalysis['Vect_1_Count'][df_VectorAnalysis['key']==key].quantile(0.30),
df_VectorAnalysis['%30_quantile_1_Count'])
df_VectorAnalysis['median_quantile_1_Count'] = np.where((df_VectorAnalysis['key']== key )
, df_VectorAnalysis['Vect_1_Count'][df_VectorAnalysis['key']==key].quantile(0.5),
df_VectorAnalysis['median_quantile_1_Count'])
df_SynthVectorAnalysis = pd.DataFrame(df_VectorAnalysis[['key','number_of_vectors','max_1_Count','min_1_Count','average_1_Count',
'%10_quantile_1_Count','%25_quantile_1_Count','%30_quantile_1_Count',
'median_quantile_1_Count']]).drop_duplicates(subset=None, keep='first', inplace=False)
df_SynthVectorAnalysis = df_SynthVectorAnalysis.sort_values(['number_of_vectors'], ascending=[0])
df_SynthVectorAnalysis = df_SynthVectorAnalysis.set_index(df_SynthVectorAnalysis['key'], drop=False)
if argument == 'summary':
return(df_SynthVectorAnalysis)
elif argument == 'global':
return(df_VectorAnalysis)
else:
print("bad argument : choose between 'summary' and 'global'")
def percent_of_total(myDataFrame,myColumnList):
"""
This function goal is to convert each continuous columns of a determined
list into a column were the values are the percentage of the sum of all
columns included in the list.
RESULT : The entry DataFrame with columns (included in 'myColumnList')
converted into percentage of their sum.
PARAMS :
- 'myDataFrame' refers to the entry myDataFrame.
- 'myColumnList' refers to the list of columns with which we want to focus
the analysis
"""
myDataFrame['total'] = myDataFrame[myColumnList].sum(1)
for column in myColumnList:
myDataFrame[column] = 100*(myDataFrame[column]/ myDataFrame['total'])
myDataFrame.drop('total',inplace=True,axis=1)
return myDataFrame
def convert_in_list(myDataFrame,myColumn):
from ast import literal_eval
"""
This function goal is to convert a pandas column into a "list" datatype column
IMPORTANT : The column values must match with the python lists pattern in order to be read and converted correctly.
RESULT : The same column, with each value converted into an array : that's also possible to loop over the array values
PARAMS :
- myDataFrame : the entry DataFrame
- myColumn : String, the column to convert
"""
myDataFrame[myColumn] = myDataFrame[myColumn].apply(literal_eval)
return myDataFrame
def develop(myDataFrame,myArrayColumn):
import pandas as pd
"""
This function goal is to develop the values contained in a pandas column which has list of values :
IMPORTANT: The column 'myArrayColumn' must be read like a colum containing lists of values; if not, you should transform
your column into a "list datatype column" thanks to the 'convert_in_list' function.
RESULT : each value contained in a given list from a given row generates a new row : the other columns values are repeated
PARAMS :
- 'myDataFrame' : the entry DataFrame
- 'myArrayColumn' : the list datatype column you want to generate one row per couple value/value_in_the_list
"""
new_df = pd.DataFrame(columns = myDataFrame.columns)
not_focus_cols = [x for x in myDataFrame.columns if x!=myArrayColumn]
for idx in myDataFrame.index:
focus_array = myDataFrame.loc[idx,myArrayColumn]
old_values = myDataFrame.loc[idx,]
for value in focus_array:
dict_one_line = {}
dict_one_line[myArrayColumn] = value
idx_df = len(new_df)
for col in not_focus_cols:
dict_one_line[col] = old_values[col]
new_df = new_df.append(dict_one_line,ignore_index=True)
return new_df
def develop(myDataFrame,myArrayColumn):
import pandas as pd
"""
This function goal is to develop the values contained in a pandas column which has list of values :
IMPORTANT: The column 'myArrayColumn' must be read like a colum containing lists of values; if not, you should transform
your column into a "list datatype column" thanks to the 'convert_in_list' function.
RESULT : each value contained in a given list from a given row generates a new row : the other columns values are repeated
PARAMS :
- 'myDataFrame' : the entry DataFrame
- 'myArrayColumn' : the list datatype column you want to generate one row per couple value/value_in_the_list
"""
new_df = pd.DataFrame(columns = myDataFrame.columns)
not_focus_cols = [x for x in myDataFrame.columns if x!=myArrayColumn]
for idx in myDataFrame.index:
focus_array = myDataFrame.loc[idx,myArrayColumn]
old_values = myDataFrame.loc[idx,]
for value in focus_array:
dict_one_line = {}
dict_one_line[myArrayColumn] = value
idx_df = len(new_df)
for col in not_focus_cols:
dict_one_line[col] = old_values[col]
new_df = new_df.append(dict_one_line,ignore_index=True)
return new_df
def group_by_frequency(myDataFrame,myColumn):
import numpy as np
"""
This function goal is to build an aggregated DataFrame which contains the occurences of the catagorical terms contained in
'myColumn' args.
RESULT : an aggregated DataFrame with the occurences of each values.
- The DataFrame is sorted by descending occurences.
- It also contains :
- rank of each category in terms of occurences.
- cumsum of occurences from the first value to the last one.
- percent of total occurences covered by the upper categories at a given row.
PARAMS :
- 'myDataFrame' : the entry DataFrame
- 'myColumn' : the column concerned by the frequencies count
"""
grouped = myDataFrame.copy()
grouped['occurences'] = 1
grouped = grouped[[myColumn,'occurences']].groupby(myColumn).sum()
grouped.sort_values(by='occurences', ascending=False, inplace=True)
grouped['rank'] = range(1,len(grouped)+1)
grouped['cumsum'] = np.cumsum(grouped['occurences'])
grouped['percent_of_total'] = grouped['cumsum']/grouped['occurences'].sum()
return grouped
def aggregate_col_in_list(myDataFrame,myColumnToAggregate,GroupByColumn,bool_replace_col):
import pandas as pd
"""
This function goal is to aggregate a given column values in order to get a list of it's values groupped by an other colum
RESULT : The entry DataFrame :
- with the same number of columns if the 'bool_replace_col' argument is 'True'
- with one more column if the 'bool_replace_col' argument is 'False' : in this case the new colum has the name
of 'myColumnToAggregate' with the suffix '_list'
PARAMS :
- 'MyDataFrame' |pandas.core.frame.DataFrame : The entry DataFrame
- 'myColumnToAggregate'|str : The column you want to convert into a list
- 'GroupByColumn' |str : The column you want to use in order to aggregate your 'myColumnToAggregate' column
- 'bool_replace_col' |bool : A boolean argument with the value :
- True if you want to replace the old column by the new one
- False in the other case : the new colum has the name of 'myColumnToAggregate' with the suffix '_list'
"""
temp_dataframe = pd.DataFrame(myDataFrame.groupby(GroupByColumn)[myColumnToAggregate].apply(list))
temp_dataframe[GroupByColumn] = temp_dataframe.index
myColumnToAggregate_new_name = myColumnToAggregate+"_list"
if bool_replace_col == True:
finalDataFrame = myDataFrame.drop(myColumnToAggregate,axis=1)
finalDataFrame = pd.merge(finalDataFrame,temp_dataframe,how='left',on=GroupByColumn)
else:
temp_dataframe.rename(index=str, columns={myColumnToAggregate:myColumnToAggregate_new_name},inplace=True)
finalDataFrame = pd.merge(myDataFrame,temp_dataframe,how='left',on=GroupByColumn)
return finalDataFrame
def filter_list_of_list_values(myList,myFilterList):
"""
This function goal is to filter values contained into a list of lists
RESULT : A filtered list of lists
PARAMS :
- 'myList' : The list of lists you want to filter
- 'myFilterList' : The list of values you want to delete into 'myList'
EXAMPLE :
L = [['Paris', 'Monaco', 'Washington', 'Lyon', 'Venise', 'Marseille'],
['New-York', 'NapleWashington', 'Turin', 'Chicago', 'Las Vegas'],
['Lyon', 'Rome', 'Chicago', 'Venise', 'Naple', 'Turin']]
filter_list_of_list_values(L,['Lyon','Turin','Chicago'])
>> [['Paris', 'Monaco', 'Washington', 'Venise', 'Marseille'],
>> ['New-York', 'NapleWashington', 'Las Vegas'],
>> ['Rome', 'Venise', 'Naple']]
"""
for index in range(len(myList)):
sub_array = myList[index]
for stopword in myFilterList :
sub_array = list(filter(lambda a: a != stopword, sub_array))
sub_array = [w for w in sub_array if not w in myFilterList]
myList[index] = sub_array
return myList
def dataframe_multiprocessing(myDataFrame,myFunction,myFunctionArg,NUMBER_OF_PROCS):
import multiprocessing
import pandas as pd
import numpy as np
"""
This function goal is to apply existing pandas transformation with multiprocessing.
IMPORTANT : the transformations you use must be rows to rows transformation : this function is not adapted for aggregations
so you need to use independant (in terms of rows) transformations
RESULT : The result of your transformation ('myFunction' arg)
PARAMS :
- 'myDataFrame' : the entry DataFrame
- 'myFunction' : the transformation/function you want to use with 'dataframe_multiprocessing'
- 'myFunctionArg' : the argument of 'myFunction'
- 'NUMBER_OF_PROCS' : the number of processors to use : the entry DataFrame will be divided into
as many parts than processors used
"""
working_DataFrame = myDataFrame.copy()
output_schema = list(working_DataFrame.columns)
working_DataFrame['old_index'] = working_DataFrame.index
#We are going to execute a multiprocessing and split the DataFrame in as many parts than processors used :
#DataFrame splitting :
L_sub_dfs = np.array_split(working_DataFrame, NUMBER_OF_PROCS)
resultDataFrame = | pd.DataFrame(columns=output_schema) | pandas.DataFrame |
import sys
from sqlalchemy import create_engine
import pandas as pd
import pickle
import nltk
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.multioutput import MultiOutputClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import classification_report
from sklearn.base import BaseEstimator, TransformerMixin
def load_data(database_filepath):
'''
Load dataframe from a database
'''
engine = create_engine('sqlite:///'+database_filepath)
df = pd.read_sql('SELECT * FROM message', engine)
X = df.message
y = df.iloc[:, 4:]
category_names = list(y.columns)
return X, y, category_names
def tokenize(text):
'''
Tokenize and lemmatize the text
'''
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
# tokenize and lemmatize every text, and save processed tokens into a list
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
class TextLengthExtractor(BaseEstimator, TransformerMixin):
'''
A class to get the length of each tokenized text, and apply the function to all cells
'''
def textlength(self, text):
return len(tokenize(text))
def fit(self, x, y=None):
return self
def transform(self, X):
X_tagged = | pd.Series(X) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__email__ = "<EMAIL>"
from ddf_library.bases.metadata import OPTGroup
from ddf_library.bases.context_base import ContextBase
from pycompss.api.parameter import FILE_IN
from pycompss.api.task import task
from pycompss.functions.reduce import merge_reduce
from pycompss.api.api import compss_wait_on, compss_delete_object
from ddf_library.ddf import DDF
from ddf_library.bases.ddf_model import ModelDDF
from ddf_library.utils import generate_info, read_stage_file
import numpy as np
import pandas as pd
class PCA(ModelDDF):
# noinspection PyUnresolvedReferences
"""
Principal component analysis (PCA) is a statistical method to find
a rotation such that the first coordinate has the largest variance
possible, and each succeeding coordinate in turn has the largest
variance possible. The columns of the rotation matrix are called
principal components. PCA is used widely in dimensionality reduction.
:Example:
>>> pca = PCA(n_components=2).fit(ddf1, input_col='features')
>>> ddf2 = pca.transform(ddf1, output_col='features_pca')
"""
def __init__(self, n_components):
"""
:param n_components: Number of output components;
"""
super(PCA, self).__init__()
self.n_components = n_components
self.var_exp = self.cum_var_exp = \
self.eig_values = self.eig_vectors = self.matrix = 0
def fit(self, data, input_col):
"""
:param data: DDF
:param input_col: Input columns;
:return: trained model
"""
df, nfrag, tmp = self._ddf_initial_setup(data)
if not isinstance(input_col, list):
input_col = [input_col]
self.input_col = input_col
partial_count = [pca_count(df[f], input_col) for f in range(nfrag)]
merged_count = merge_reduce(pca_merge_count, partial_count)
for f in range(nfrag):
partial_count[f] = partial_multiply(df[f], input_col, merged_count)
merged_cov = merge_reduce(pca_cov_merger, partial_count)
info = pca_eigen_decomposition(merged_cov, self.n_components)
compss_delete_object(partial_count)
compss_delete_object(merged_count)
compss_delete_object(merged_cov)
self.var_exp, self.eig_values, self.eig_vectors, self.matrix = info
self.cum_var_exp = np.cumsum(self.var_exp)
self.model = dict()
self.model['algorithm'] = self.name
# cumulative explained variance
self.model['cum_var_exp'] = self.cum_var_exp
self.model['eig_values'] = self.eig_values
self.model['eig_vectors'] = self.eig_vectors
self.model['model'] = self.matrix
return self
def fit_transform(self, data, input_col, output_col='_pca', remove=False):
"""
Fit the model and transform.
:param data: DDF
:param input_col: Input columns;
:param output_col: A list of output feature column or a suffix name.
:param remove: Remove input columns after execution (default, False).
:return: DDF
"""
self.fit(data, input_col)
ddf = self.transform(data, output_col=output_col, remove=remove)
return ddf
def transform(self, data, input_col=None, output_col='_pca', remove=False):
"""
:param data: DDF
:param input_col: Input columns;
:param output_col: A list of output feature column or a suffix name.
:param remove: Remove input columns after execution (default, False).
:return: DDF
"""
self.check_fitted_model()
if not input_col:
input_col = self.input_col
if not isinstance(output_col, list):
output_col = ['{}{}'.format(col, output_col) for col in input_col]
self.output_col = output_col
self.remove = remove
self.settings = self.__dict__.copy()
uuid_key = ContextBase \
.ddf_add_task(operation=self, parent=[data.last_uuid])
return DDF(last_uuid=uuid_key)
@staticmethod
def function(df, params):
params = params.copy()
params['model'] = params['model']['model']
return _pca_transform(df, params)
@task(returns=1, data_input=FILE_IN)
def pca_count(data_input, col):
"""Partial count."""
data = read_stage_file(data_input, col)
partial_size = len(data)
partial_sum = 0
if partial_size > 0:
partial_sum = data[col].values.sum(axis=0)
return [partial_size, partial_sum]
@task(returns=1)
def pca_merge_count(count1, count2):
"""Merge partial counts."""
partial_size = count1[0] + count2[0]
partial_sum = np.add(count1[1], count2[1])
return [partial_size, partial_sum]
@task(returns=1, data_input=FILE_IN)
def partial_multiply(data_input, col, info):
"""Perform partial calculation."""
cov_mat = 0
total_size = info[0]
data = read_stage_file(data_input, col)
if len(data) > 0:
mean_vec = np.array(info[1]) / total_size
x_std = data[col].values
first_part = x_std - mean_vec
cov_mat = first_part.T.dot(first_part)
return [cov_mat, total_size]
@task(returns=1)
def pca_cov_merger(info1, info2):
"""Merge covariance."""
cov1, total_size = info1
cov2, _ = info2
return [np.add(cov1, cov2), total_size]
def pca_eigen_decomposition(info, n_components):
"""Generate an eigen decomposition."""
info = compss_wait_on(info)
cov_mat, total_size = info
dim = len(cov_mat)
n_components = min([n_components, dim])
cov_mat = cov_mat / (total_size-1)
eig_values, eig_vectors = np.linalg.eig(cov_mat)
eig_values = np.abs(eig_values)
total_values = sum(eig_values)
var_exp = [i*100/total_values for i in eig_values]
# Sort the eigenvalue and vectors tuples from high to low
idx = eig_values.argsort()[::-1]
eig_values = eig_values[idx]
eig_vectors = eig_vectors[idx]
matrix_w = eig_vectors[:, :n_components]
return var_exp, eig_values, eig_vectors, matrix_w
def _pca_transform(data, settings):
"""Reduce the dimensionality based in the created model."""
features = settings['input_col']
pred_col = settings['output_col']
matrix_w = settings['model']
frag = settings['id_frag']
remove = settings['remove']
n_components = min([len(pred_col), len(matrix_w)])
pred_col = pred_col[0: n_components]
if len(data) > 0:
array = data[features].values
if not remove:
to_remove = [c for c in pred_col if c in data.columns]
else:
to_remove = features
data.drop(to_remove, axis=1, inplace=True)
res = array.dot(matrix_w)
data = pd.concat([data, | pd.DataFrame(res, columns=pred_col) | pandas.DataFrame |
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import pandas as pd
import numpy as np
# sklearn algorithm : classification, regression, clustring, reduction
from sklearn.ensemble import RandomForestClassifier # rforest
from sklearn.tree import DecisionTreeClassifier # dtree
from sklearn.naive_bayes import GaussianNB # nb
from sklearn.neighbors import KNeighborsClassifier # knn
from sklearn.svm import SVC # svm
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold # k값은 count 의 의미로 이해
from sklearn.model_selection import cross_val_score
# dtree, rforest, nb, knn, svm,
"""
context: /Users/bitai/emp_ai
fname:
PassengerId 고객ID,
Survived 생존여부, --> 머신러닝 모델이 맞춰야 할 답
Pclass 승선권 1 = 1등석, 2 = 2등석, 3 = 3등석,
Name,
Sex,
Age,
SibSp 동반한 형제, 자매, 배우자,
Parch 동반한 부모, 자식,
Ticket 티켓번호,
Fare 요금,
Cabin 객실번호,
Embarked 승선한 항구명 C = 쉐브루, Q = 퀸즈타운, S = 사우스햄튼
"""
from dataclasses import dataclass
@dataclass
class FileReader:
context: str = ''
fname: str = ''
train: object = None
test: object = None
id: str = ''
label: str = ''
class TitanicModel:
def __init__(self):
self.fileReader = FileReader()
self.data = './data'
def new_model(self, payload) -> object:
this = self.fileReader
this.data = self.data
this.fname = payload
return pd.read_csv(os.path.join(this.data, this.fname)) # p.139 df = tensor
@staticmethod
def create_train(this) -> object:
return this.train.drop('Survived', axis = 1) # train 은 답이 제거된 데이터셋이다.
@staticmethod
def create_label(this) -> object:
return this.train['Survived'] # label은 곧 답이 된다.
@staticmethod
def drop_feature(this, feature) -> object:
this.train = this.train.drop([feature], axis = 1)
this.test = this.test.drop([feature], axis = 1) # p.149 에 보면 훈련, 테스트 세트로 나눈다
return this
@staticmethod
def pclass_ordinal(this) -> object:
return this
@staticmethod
def sex_nominal(this) -> object:
combine = [this.train, this.test] # train과 test 가 묶입니다.
sex_mapping = {'male':0, 'female':1}
for dataset in combine:
dataset['Sex'] = dataset['Sex'].map(sex_mapping)
this.train = this.train # overriding
this.test = this.test
return this
@staticmethod
def age_ordinal(this) -> object:
train = this.train
test = this.test
train['Age'] = train['Age'].fillna(-0.5)
test['Age'] = test['Age'].fillna(-0.5)
# age 를 평균으로 넣기도 애매하고, 다수결로 넣기도 너무 근거가 없다...
# 특히 age 는 생존률 판단에서 가중치(weight)가 상당하므로 디테일한 접근이 필요합니다.
# 나이를 모르는 승객은 모르는 상태로 처리해야 값의 왜곡을 줄일수 있어서
# -0.5 라는 중간값으로 처리했습니다.
bins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf] # 이 파트는 범위를 뜻합니다.
# -1 이상 0 미만....60이상 기타 ...
# [] 에 있으니 이것은 변수명이겠군요..라고 판단하셨으면 잘 이해한 겁니다.
labels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior']
# [] 은 변수명으로 선언되었음
train['AgeGroup'] = | pd.cut(train['Age'], bins, lables=labels) | pandas.cut |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:annorxiver]
# language: python
# name: conda-env-annorxiver-py
# ---
# # Find published articles missing from bioRxiv
# +
from pathlib import Path
import pickle
import numpy as np
import pandas as pd
import plotnine as p9
from scipy.spatial.distance import cdist
from sklearn.linear_model import LogisticRegressionCV
import tqdm
# -
# # Load Embeddings
# ## bioRxiv
biorxiv_journal_df = (
pd.read_csv(
"../journal_tracker/output/mapped_published_doi_before_update.tsv", sep="\t"
)
.rename(index=str, columns={"doi": "preprint_doi"})
.groupby("preprint_doi")
.agg(
{
"document": "last",
"category": "first",
"preprint_doi": "last",
"published_doi": "first",
"pmcid": "first",
"pmcoa": "first",
}
)
.reset_index(drop=True)
)
biorxiv_journal_df.head()
biorxiv_embed_df = pd.read_csv(
Path("../word_vector_experiment/output/")
/ "word2vec_output/"
/ "biorxiv_all_articles_300_fixed.tsv.xz",
sep="\t",
)
biorxiv_embed_df = biorxiv_embed_df.dropna()
biorxiv_embed_df.head()
biorxiv_journal_mapped_df = biorxiv_journal_df[
["document", "published_doi", "pmcid", "pmcoa"]
].merge(biorxiv_embed_df, on="document")
biorxiv_journal_mapped_df.head()
# ## Pubmed Central
pmc_articles_df = pd.read_csv(
Path("../../pmc/exploratory_data_analysis/")
/ "output/pubmed_central_journal_paper_map.tsv.xz",
sep="\t",
).query("article_type=='research-article'")
pmc_articles_df.head()
pmc_embed_df = pd.read_csv(
Path("../../pmc/word_vector_experiment/output")
/ Path("pmc_document_vectors_300_replace.tsv.xz"),
sep="\t",
)
pmc_embed_df.head()
pmc_journal_mapped_df = (
pmc_articles_df[["doi", "pmcid"]]
.merge(pmc_embed_df, left_on="pmcid", right_on="document")
.drop("pmcid", axis=1)
)
pmc_journal_mapped_df.head()
# # Calculate Distances
# ## biorxiv -> published versions
biorxiv_published = (
biorxiv_journal_mapped_df.query("pmcid.notnull()")
.query("pmcoa == True")
.sort_values("pmcid", ascending=True)
.drop_duplicates("pmcid")
.set_index("pmcid")
)
biorxiv_published.head()
PMC_published = (
pmc_journal_mapped_df.query(f"document in {biorxiv_published.index.tolist()}")
.sort_values("document", ascending=True)
.set_index("document")
)
PMC_published.head()
article_distances = cdist(
biorxiv_published.loc[PMC_published.index.tolist()].drop(
["document", "published_doi", "pmcoa"], axis=1
),
PMC_published.drop(["doi", "journal"], axis=1),
"euclidean",
)
article_distances.shape
articles_distance_df = (
biorxiv_published.loc[PMC_published.index.tolist()]
.reset_index()[["document", "pmcid"]]
.assign(
distance=np.diag(article_distances, k=0), journal=PMC_published.journal.tolist()
)
)
articles_distance_df.head()
# ## biorxiv -> random paper same journal
PMC_off_published = (
pmc_journal_mapped_df.drop("doi", axis=1)
.query(f"document not in {biorxiv_published.index.tolist()}")
.query(f"journal in {articles_distance_df.journal.unique().tolist()}")
.groupby("journal", group_keys=False)
.apply(lambda x: x.sample(1, random_state=100))
)
PMC_off_published.head()
journal_mapper = {
journal: col for col, journal in enumerate(PMC_off_published.journal.tolist())
}
list(journal_mapper.items())[0:10]
off_article_dist = cdist(
biorxiv_published.loc[PMC_published.index.tolist()]
.drop(["document", "published_doi", "pmcoa"], axis=1)
.values,
PMC_off_published.drop(["document", "journal"], axis=1).values,
"euclidean",
)
off_article_dist.shape
data = []
for idx, row in tqdm.tqdm(articles_distance_df.iterrows()):
if row["journal"] in journal_mapper:
data.append(
{
"document": row["document"],
"pmcid": (
PMC_off_published.query(f"journal=='{row['journal']}'")
.reset_index()
.document.values[0]
),
"journal": row["journal"],
"distance": off_article_dist[idx, journal_mapper[row["journal"]]],
}
)
final_df = articles_distance_df.assign(label="pre_vs_published").append(
pd.DataFrame.from_records(data).assign(label="pre_vs_random")
)
final_df.head()
final_df = biorxiv_journal_df[["document", "preprint_doi"]].merge(final_df)
final_df.to_csv("output/annotated_links/article_distances.tsv", sep="\t", index=False)
final_df.head()
# # Distribution plot
g = (
p9.ggplot(
final_df.replace(
{
"pre_vs_published": "preprint-published",
"pre_vs_random": "preprint-random",
}
)
)
+ p9.aes(x="label", y="distance")
+ p9.geom_violin(fill="#a6cee3")
+ p9.labs(x="Document Pair Groups", y="Euclidean Distance")
+ p9.theme_seaborn(context="paper", style="ticks", font="Arial", font_scale=2)
)
g.save("output/figures/biorxiv_article_distance.svg")
g.save("output/figures/biorxiv_article_distance.png")
print(g)
# # Logistic Regression bioRxiv preprints -> published PMC articles
model = LogisticRegressionCV(
Cs=5,
cv=10,
random_state=100,
penalty="elasticnet",
solver="saga",
l1_ratios=[0.1, 0.5, 0.8],
verbose=1,
)
retained_ids = list(
set(PMC_published.index.tolist()) & set(biorxiv_published.index.tolist())
)
training_dataset = (
biorxiv_published.dropna()
.drop(["document", "published_doi", "pmcoa"], axis=1)
.loc[retained_ids]
- PMC_published.loc[retained_ids].dropna().drop(["journal", "doi"], axis=1)
).assign(
biorxiv_document=biorxiv_published.loc[retained_ids].document.values, true_link=1
)
training_dataset.head()
journals = (
PMC_published.loc[retained_ids]
.query(f"journal in {PMC_off_published.journal.tolist()}")
.journal.values.tolist()
)
off_documents = (
PMC_published.loc[retained_ids]
.query(f"journal in {PMC_off_published.journal.tolist()}")
.index.tolist()
)
training_dataset = (
training_dataset.append(
pd.DataFrame(
biorxiv_published.loc[off_documents]
.drop(["document", "published_doi", "pmcoa"], axis=1)
.values
- PMC_off_published.iloc[list(map(lambda x: journal_mapper[x], journals))]
.set_index("journal")
.drop("document", axis=1)
.values,
columns=[f"feat_{idx}" for idx in range(300)],
).assign(true_link=-1)
)
.reset_index(drop=True)
.drop("biorxiv_document", axis=1)
.dropna()
)
training_dataset.head()
fit_model = model.fit(
training_dataset.sample(frac=1, random_state=100).drop("true_link", axis=1),
training_dataset.sample(frac=1, random_state=100).true_link,
)
fit_model.scores_
pickle.dump(fit_model, open("output/optimized_model.pkl", "wb"))
# # Find bioRxiv unpublished -> published PMC articles
biorxiv_unpublished = biorxiv_journal_mapped_df.query("published_doi.isnull()").drop(
["published_doi", "pmcid", "pmcoa"], axis=1
)
print(biorxiv_unpublished.shape)
biorxiv_unpublished.head()
PMC_unlinked = pmc_journal_mapped_df.query(
f"""
document not in {
biorxiv_published
.reset_index()
.pmcid
.tolist()
}
"""
)
print(PMC_unlinked.shape)
PMC_unlinked.head()
cutoff_score = final_df.query("label=='pre_vs_random'").distance.min()
cutoff_score
chunksize = 100
chunk_iterator = range(0, biorxiv_unpublished.shape[0], chunksize)
for idx, chunk in tqdm.tqdm(enumerate(chunk_iterator)):
# Chunk the documents so memory doesn't break
biorxiv_subset = biorxiv_unpublished.iloc[chunk : chunk + chunksize]
# Calculate distances
paper_distances = cdist(
biorxiv_subset.drop(["document"], axis=1),
PMC_unlinked.drop(["journal", "document", "doi"], axis=1),
"euclidean",
)
# Get elements less than threshold
sig_indicies = np.where(paper_distances < cutoff_score)
results = zip(
sig_indicies[0],
sig_indicies[1],
paper_distances[paper_distances < cutoff_score],
)
# Map the results into records for pandas to parse
results = list(
map(
lambda x: dict(
document=biorxiv_subset.iloc[x[0]].document,
pmcid=PMC_unlinked.iloc[x[1]].document,
distance=x[2],
),
results,
)
)
# There may be cases where there are no matches
if len(results) > 0:
# Generate pandas dataframe
potential_matches_df = (
biorxiv_journal_df[["document", "preprint_doi"]]
.merge( | pd.DataFrame.from_records(results) | pandas.DataFrame.from_records |
#!/usr/bin/python3
import pandas as pd
import subprocess
import os
import matplotlib.pyplot as plt
import numpy as np
import time
import glob
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# Set up a bunch of settings to test, more than will be plotted to ensure that I can change things around to plot different values
dimensions = [(660, 120), (1000, 200), (2000, 400), (4000, 800), (8000, 1600)]
sim_times = [0.5, 0.2, 0.05, 0.01, 0.004]
omp_num_threads_tested = [1, 2, 3, 4, 5, 6]
sbatch_nodes_tested = [1, 2, 3, 4, 8]
# List of items to actually plot as different lines
omp_num_threads_plot = [1, 2, 4, 6]
sbatch_nodes_plot = [1, 2, 4, 8]
dimensions_plot = [(660, 120), (2000, 400), (8000, 1600)]
# Extract timing data from line
def get_time_from_timing_line(line):
string_time = line.split(" ")[3]
return float(string_time)
class CFDRunner:
"""
Class to handle running a configuration via slurm and process the output
"""
def __init__(self, id):
"""
Set the default parameters
Takes an id to keep config files separate
"""
self.x = 660
self.y = 120
self.t = 0.2
self.sbatch_nodes = 1
self.sbatch_tasks = 0.5
self.sbatch_time = "00:07:00"
self.omp_threads = 6
self.in_file = os.path.join("test", f"initial-{id}.bin")
self.out_file = os.path.join("test", f"completed-{id}.bin")
self.sbatch_file = os.path.join("test", f"submit-{id}.sbatch")
self.single_thread = False
def run(self):
"""
Run the slurm batch file and extract the slurm job id to read the file later
"""
process_output = subprocess.run(["sbatch", self.sbatch_file], stdout=subprocess.PIPE)
output_lines = process_output.stdout.decode().split("\n")
self.sbatch_id = output_lines[0].split(" ")[3]
def is_still_running(self):
"""
Check if the job still appears in the queue -> probably still running
"""
process_output = subprocess.run(["squeue"], stdout=subprocess.PIPE)
output_lines = process_output.stdout.decode().split("\n")
return any([self.sbatch_id in line for line in output_lines])
def parse_output(self):
"""
Parse the output into a dataframe of timing data
"""
with open(f"slurm-{self.sbatch_id}.out", "r") as fh:
lines = fh.readlines()
i = 0
# while i < len(lines) and "I am process" not in lines[i]:
# i += 1
# shape_output = lines[i]
timing_results = []
# Basically go line by line and extract the timing data
# If a timestep label is seen it knows a new set of measurements is starting
# Add the current of the data to the dataframe
# Note: Uses this weird method because it wasn't known which order the measurements would be output in
current_time = None
timestep_time_taken = None
compute_velocity_time_taken = None
rhs_time_taken = None
possion_time_taken = None
update_velocity_time_taken = None
boundary_time_taken = None
sync_time_taken = None
possion_p_loop_time_taken = None
possion_res_loop_time_taken = None
for line in lines[i:]:
try:
if "--- Timestep" in line:
if current_time is not None:
timing_results.append([
current_time,
timestep_time_taken,
compute_velocity_time_taken,
rhs_time_taken,
possion_time_taken,
update_velocity_time_taken,
boundary_time_taken,
sync_time_taken,
possion_p_loop_time_taken,
possion_res_loop_time_taken,
])
current_time = float(line.split(" ")[3])
elif "timestep_time_taken" in line:
timestep_time_taken = float(line.split(" ")[1])
elif "compute_velocity_time_taken" in line:
compute_velocity_time_taken = float(line.split(" ")[1])
elif "rhs_time_taken" in line:
rhs_time_taken = float(line.split(" ")[1])
elif "possion_time_taken" in line:
possion_time_taken = float(line.split(" ")[1])
elif "update_velocity_time_taken" in line:
update_velocity_time_taken = float(line.split(" ")[1])
elif "boundary_time_taken" in line:
boundary_time_taken = float(line.split(" ")[1])
elif "sync_time_taken" in line:
sync_time_taken = float(line.split(" ")[1])
elif "possion_p_loop_time_taken" in line:
possion_p_loop_time_taken = float(line.split(" ")[1])
elif "possion_res_loop_time_taken" in line:
possion_res_loop_time_taken = float(line.split(" ")[1])
except Exception as e:
print("Exception", e)
# Label the dataframe columns and return
df = pd.DataFrame(timing_results, columns=("Timestep", "timestep_time_taken", "compute_velocity_time_taken", "rhs_time_taken", "possion_time_taken", "update_velocity_time_taken", "boundary_time_taken", "sync_time_taken", "possion_p_loop_time_taken", "possion_res_loop_time_taken"))
return df
def save_sbatch(self):
"""
Export the configuration as a file to be run by sbatch
Bind to socket to avoid performing openmp across two sockets to avoid memory latency
"""
# Default to using the parallel implementation
command = f"time mpirun -n {self.sbatch_nodes} -npernode 1 --bind-to socket ./karman-par -x {self.x} -y {self.y} --infile {self.in_file} -o {self.out_file} -t {self.t}\n"
omp_line = f"export OMP_NUM_THREADS={self.omp_threads}\n"
# If singlethread use the other executable
if self.single_thread:
command = f"time ./karman -x {self.x} -y {self.y} --infile {self.in_file} -o {self.out_file} -t {self.t}\n"
omp_line = "\n"
# Write out the file
with open(self.sbatch_file, "w") as fh:
fh.writelines([
"#!/bin/bash\n",
"#SBATCH --job-name=cfd-graphs\n",
"#SBATCH --partition=cs402\n",
"#SBATCH --nice=9000\n",
"#SBATCH --ntasks-per-socket=1\n", # avoid going from socket to socket with openmp
f"#SBATCH --nodes={self.sbatch_nodes}\n",
f"#SBATCH --ntasks-per-node=1\n",
f"#SBATCH --cpus-per-task=12\n" # required for 6x scaling running on slurm scaled correctly with openmp up to 6 threads but after that failed to improve. I think it is only allocating one socket.
f"#SBATCH --time={self.sbatch_time}\n",
". /etc/profile.d/modules.sh\n",
"module purge\n",
"module load cs402-mpi\n",
omp_line,
command,
"#gprof ./karman\n",
"./bin2ppm < karman.bin > karman.ppm\n",
"./diffbin karman.vanilla.bin karman.bin\n",
])
def collect_data():
"""
Run all configurations
"""
all_df = pd.DataFrame({
"x": pd.Series(dtype='int32'),
"y": | pd.Series(dtype='int32') | pandas.Series |
import logging
import re
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
import matplotlib.dates as mdates
import matplotlib.patheffects as PathEffects
from fontTools.ttLib import TTFont
import numpy as np
from pathlib import Path
import io
import json
from datetime import datetime, timedelta
from scipy.optimize import curve_fit
logging.basicConfig(level=logging.INFO)
def str_remove_duplicated_whitespaces(s):
return re.sub(r'\s{2,}', ' ', s)
def is_float(s):
if type(s) == pd.Series:
return s.apply(lambda x: is_float(x))
try:
float(s)
except ValueError:
return False
else:
return True
def ser_is_identical(ser, value = None):
u = ser.unique()
if u.size != 1:
return False
if value:
if u[0] != value:
return False
return True
def df_rename_duplicated_columns(df):
cols=pd.Series(df.columns)
for dup in cols[cols.duplicated()].unique():
cols[cols == dup] = [dup + '.' + str(i) if i != 0 else dup for i in range(sum(cols == dup))]
df.columns = cols
return df
def df_rename_columns(df, cols:dict):
ren = {v2: k for k, v in cols.items() for v2 in v}
return df.rename(columns = ren)
def df_sort_multilevel_values(df, *args, **kwargs):
axis = kwargs.get('axis', 0)
df_sorted = pd.DataFrame()
if axis == 0:
levels = df.index.levels
else:
levels = df.columns.levels
for l in levels[-2]:
df_sorted = pd.concat([df_sorted, df[[l]].sort_values(*args, **kwargs)], axis = 1)
return df_sorted
def df_split_multilevel(df, axis = 0):
dfs = {}
if axis == 0:
keys = pd.MultiIndex.from_product(df.index.levels[:-1])
for k in keys:
if k in df:
dfs[k] = df.loc[k]
else:
keys = pd.MultiIndex.from_product(df.columns.levels[:-1])
for k in keys:
if k in df:
dfs[k] = df[k]
return dfs
def load_meta(meta_fp:str):
with io.open(meta_fp, 'r', encoding='utf-8') as f:
meta = json.load(f)
return meta
def load(data_fp:str = '.', merge = True):
"""
Loads raw data from all stored .csv files
"""
meta_fn = 'meta.json'
if type(data_fp) == str:
data_fp = [data_fp]
df_array = []
for fp in data_fp:
fp = Path(fp).resolve()
if fp.is_dir():
fp_array = (fp.glob('*.csv'))
meta_fn = str(fp / meta_fn)
else:
fp_array = [fp]
meta_fn = str(fp.parent / meta_fn)
meta = load_meta(meta_fn)
df_freq_groups = {}
for fp in fp_array:
logging.info(f"Reading '{fp}'...")
if 'header' in meta:
header = meta['header']
else:
with open(fp, encoding='utf8') as f:
lines = f.readlines()
for i, l in enumerate(lines):
first_item = l.split(',')[0]
try:
pd.Period(first_item)
except:
continue
else:
break
header = list(range(i))
index_col = 0
df = pd.read_csv(fp, index_col = index_col, header = header, comment = "#")
df.index = pd.PeriodIndex([pd.Period(i) for i in df.index])
if merge:
freqstr = df.index.freqstr
df_freq_groups.setdefault(freqstr, pd.DataFrame())
df_freq_groups[freqstr] = pd.concat([df_freq_groups[freqstr], df])
else:
df_array.append(df)
if merge:
df_array.extend([df.sort_index(axis = 0) for df in df_freq_groups.values()])
if len(df_array) == 0:
df_array = None
elif len(df_array) == 1:
df_array = df_array[0]
return df_array
#Get available system fonts
#import matplotlib.font_manager
#flist = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
#names = [matplotlib.font_manager.FontProperties(fname=fname).get_name() for fname in flist]
#Set font to support Chinese
mpl.rc('font', family = 'SimHei', size = 12)
#mpl.rc('font', family = 'Microsoft New Tai Lue')
def read_raw(raw_filepath, unit = 1E8):
fp = Path(raw_filepath)
if fp.is_dir():
fp_array = (fp.glob('*.csv'))
else:
fp_array = [fp]
for fp in fp_array:
with open(fp, encoding='utf8') as f:
items = f.readline().split(',')
for i, v in enumerate(items):
try:
pd.Period(v)
except:
continue
else:
break
index_col = list(range(i))
d = pd.read_csv(fp, index_col = index_col, header=[0])
d = d.T * unit
d.to_csv(fp.name)
def plot_bar(df, **kwargs):
if isinstance(df, pd.core.series.Series):
df = pd.DataFrame(df)
ax = df.plot.bar(**kwargs)
for container in ax.containers:
ax.bar_label(container)
return ax
def plot(df, **kwargs):
if isinstance(df, pd.core.series.Series):
df = | pd.DataFrame(df) | pandas.DataFrame |
#!/usr/bin/env python3
import os, sys
from pathlib import Path
import argparse
learnDir=Path(__file__).parent.absolute()
parentDir=learnDir.parent
sys.path.insert(0, str(parentDir))
from HDDModelDecoder.ml import WDDmatSpec
datasetDir=parentDir / "dataset"
defaultModelsDirName = "WD_models"
defaultModelsDir=Path(WDDmatSpec.__file__).parent/defaultModelsDirName
#defaultModelsDir=learnDir / defaultModelsDirName
def loadDataset():
import json
with (datasetDir / "WD_series.json").open("rt", encoding="utf-8") as f:
return json.load(f)
def flattenDictGen(d):
for k,v in d.items():
if isinstance(v, dict):
yield from ( (k+"_"+kk if k != kk else k, vv) for kk, vv in flattenDictGen(v) )
else:
yield (k, v)
def flattenDict(d):
return dict(flattenDictGen(d))
def prepareDataset():
import re
import pandas
from HDDModelDecoder.WD import WDDecoderInternal
from decodeWDModelSeriesModifiers import decodeWDModelSeriesModifiers, series, seriesRx, modifiersSplitRx
from dataset import attrGrouppedToRecords
ds=loadDataset()
ds=attrGrouppedToRecords(ds)
#modifiers=set()
for r in ds:
tr=flattenDict(WDDecoderInternal(r["name"], learn=True)[1])
if "series" in tr:
del(tr["series"])
r.update(tr)
if "series" in r:
r.update(decodeWDModelSeriesModifiers(r["series"]))
ds= | pandas.DataFrame.from_records(ds) | pandas.DataFrame.from_records |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.