prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
"""System unserved energy plots.
This module creates unserved energy timeseries line plots and total bar
plots and is called from marmot_plot_main.py
@author: <NAME>
"""
import logging
import pandas as pd
import marmot.config.mconfig as mconfig
from marmot.plottingmodules.plotutils.plot_library import PlotLibrary
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, MissingZoneData)
class MPlot(PlotDataHelper):
"""unserved_energy MPlot Class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The unserved_energy.py module contains methods that are
related to unserved energy in the power system.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, self.TECH_SUBSET,
Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
def unserved_energy_timeseries(self, timezone: str = "",
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a timeseries line plot of total unserved energy.
Each sceanrio is plotted as a separate line.
Args:
timezone (str, optional): The timezone to display on the x-axes.
Defaults to "".
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, f"{agg}_Unserved_Energy", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f'Zone = {zone_input}')
Unserved_Energy_Timeseries_Out = pd.DataFrame()
for scenario in self.Scenarios:
self.logger.info(f'Scenario = {scenario}')
unserved_eng_timeseries = self[f"{agg}_Unserved_Energy"].get(scenario)
unserved_eng_timeseries = unserved_eng_timeseries.xs(zone_input,level=self.AGG_BY)
unserved_eng_timeseries = unserved_eng_timeseries.groupby(["timestamp"]).sum()
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
unserved_eng_timeseries = unserved_eng_timeseries[start_date_range : end_date_range]
unserved_eng_timeseries = unserved_eng_timeseries.squeeze() #Convert to Series
unserved_eng_timeseries.rename(scenario, inplace=True)
Unserved_Energy_Timeseries_Out = pd.concat([Unserved_Energy_Timeseries_Out, unserved_eng_timeseries],
axis=1, sort=False).fillna(0)
Unserved_Energy_Timeseries_Out.columns = Unserved_Energy_Timeseries_Out.columns.str.replace('_',' ')
Unserved_Energy_Timeseries_Out = Unserved_Energy_Timeseries_Out.loc[:, (Unserved_Energy_Timeseries_Out >= 1)
.any(axis=0)]
if Unserved_Energy_Timeseries_Out.empty==True:
self.logger.info(f'No Unserved Energy in {zone_input}')
out = MissingZoneData()
outputs[zone_input] = out
continue
# Determine auto unit coversion
unitconversion = self.capacity_energy_unitconversion(Unserved_Energy_Timeseries_Out)
Unserved_Energy_Timeseries_Out = Unserved_Energy_Timeseries_Out/unitconversion['divisor']
# Data table of values to return to main program
Data_Table_Out = Unserved_Energy_Timeseries_Out.add_suffix(f" ({unitconversion['units']})")
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
# Converts color_list into an iterable list for use in a loop
iter_colour = iter(self.color_list)
for column in Unserved_Energy_Timeseries_Out:
mplt.lineplot(Unserved_Energy_Timeseries_Out[column],
color=next(iter_colour), linewidth=3,
label=column)
mplt.add_legend()
ax.set_ylabel(f"Unserved Energy ({unitconversion['units']})",
color='black', rotation='vertical')
ax.set_ylim(bottom=0)
ax.margins(x=0.01)
mplt.set_subplot_timeseries_format()
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
return outputs
def tot_unserved_energy(self, start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a bar plot of total unserved energy.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, f"{agg}_Unserved_Energy", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
Unserved_Energy_Timeseries_Out = | pd.DataFrame() | pandas.DataFrame |
# coding=utf-8
import os
import sys
# === import project path ===
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
# ===========================
import base.default_excutable_argument as dea
import pandas as pd
from PIL import Image
import os
import copy
import queue
import threading
import multiprocessing
from multiprocessing import Manager
from multiprocessing import Pool
from multiprocessing import cpu_count
multiprocessing.freeze_support()
ILSVRC_train_root = '/data/ILSVRC2012/train'
save_to = '/data/process_data/caojihua/ILSVRC/'
# collect information into a PD
statistic_sample = 'statistic_sample.csv'
statistic_label = 'statistic_label.csv'
run_time_message = 'statistic_info.txt'
process_amount = cpu_count()
argument = dea.Argument()
parser = argument.parser
parser.add_argument(
'--train_root',
action='store',
dest='TrainRoot',
type=str,
default='',
help='if this flag is set, the program just test train in perform'
)
args = parser.parse_args()
def ifp_listening(ifp, queue):
while True:
msg = queue.get()
if msg == 'end':
ifp.write('killed')
break
ifp.write(msg)
ifp.flush()
pass
ifp.close()
pass
def ifp_write(queue, msg):
queue.put(msg)
pass
def read_image_information(class_dir, sample_list, image_info_queue, ifp_queue):
for sample_element in sample_list:
sample_dir = os.path.join(class_dir, sample_element)
try:
im = Image.open(sample_dir)
width, height = im.size
channel = im.layers
image_info_queue.put(
[False, {'image_name': sample_element, 'height': height, 'width': width, 'channel': channel}])
del im
except Exception as ex:
ifp_write(ifp_queue, '{0} failed {1}\n'.format(sample_dir, ex.args))
pass
pass
image_info_queue.put([True, {}])
pass
def deal_with_class(classes, ifp_queue):
df_for_label = pd.DataFrame(columns=['class_dir', 'reflect_name'])
df_for_sample = pd.DataFrame(columns=['class', 'image_name', 'height', 'width', 'channel'])
l = 0
while l < len(classes):
class_element = classes[l]
try:
print('deal with {0}'.format(class_element))
df_for_label = df_for_label.append({'class_dir': class_element, 'reflect_name': class_element},
ignore_index=True)
class_dir = os.path.join(ILSVRC_train_root, class_element)
sample_list = os.listdir(class_dir)
# add to queue
image_info_queue = queue.Queue()
read_thread = threading.Thread(target=read_image_information,
args=(class_dir, sample_list, image_info_queue, ifp_queue))
read_thread.start()
base_dict = {'class': class_element}
sample_ = list()
while True:
element = image_info_queue.get()
if element[0] is False:
base_dict.update(element[1])
sample_.append(copy.deepcopy(base_dict))
pass
else:
break
pass
pass
read_thread.join()
df_for_sample = df_for_sample.append(sample_, ignore_index=True)
del sample_
pass
except Exception as ex:
ifp_write(ifp_queue, '{0}\n'.format(ex.args))
pass
l += 1
print('pod: {0}, deal {1}, remain: {2}'.format(os.getpid(), l, len(classes) - l))
pass
print('done:{0}'.format(classes))
return df_for_sample, df_for_label
def deal_with_ilsvrc(info_save_to, sample_save_to, label_save_to):
global process_amount
class_list = os.listdir(ILSVRC_train_root)
# seperate class_list to process_amount parts
seperate_class_list = []
if process_amount > len(class_list):
process_amount = len(class_list)
else:
pass
base_len = len(class_list) // process_amount
end_len = len(class_list) % process_amount + base_len
start = 0
length = base_len
for i in range(0, process_amount):
seperate_class_list.append(class_list[start: length])
start = start + base_len
if i != process_amount - 2:
length = start + base_len
pass
else:
length = start + end_len
assert(sum([len(i) for i in seperate_class_list]) == len(class_list))
ifp_queue = Manager().Queue()
process_list = []
pool = Pool(processes=process_amount)
with open(info_save_to, 'w') as ifp:
pool.apply_async(ifp_listening, args=(ifp, ifp_queue))
for scl in seperate_class_list:
# process = pool.apply_async(test, args=(1,))
process = pool.apply_async(deal_with_class, args=(scl, ifp_queue))
# process.start()
process_list.append(process)
pass
pool.close()
pool.join()
pass
sample_pd_collection = []
label_pd_collection = []
for pl in process_list:
s, l = pl.get()
sample_pd_collection.append(s)
label_pd_collection.append(l)
pass
label_pd = pd.concat(label_pd_collection, ignore_index=True)
sample_pd = | pd.concat(sample_pd_collection, ignore_index=True) | pandas.concat |
import numpy as np
import pandas as pd
import argparse
import os
# Takes in list from decode.py
# Generates data required for next round of translation
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, required=True) # New pairs from decode.py
parser.add_argument('--output', type=str, required=True) # New train_pairs.txt for the next round
parser.add_argument('--mol_list', type=str, required=True) # New molecule list for vocab.py
parser.add_argument('--folder', type=str, required=True) # Folder where extra files generated should be stored
parser.add_argument('--old_pairs', type=str, required=True) # Previous pairlist to append new pairlist to
parser.add_argument('--tox', type=bool,
default=False) # Include toxicity constraint
parser.add_argument('--predictor', type=str, # Chemprop model to use for prediction
default='/data/rsg/chemistry/cbilod/chemprop_old/checkpoints/solubility/solubility-1594231605.496638/fold_0')
parser.add_argument('--tox_predictor', type=str, # Chemprop model to use for prediction
default='/data/rsg/chemistry/cbilod/chemprop_old/checkpoints/tox21/tox21-1596485275.340425/fold_0')
parser.add_argument('--threshold',type=float, default=0.79) # Minimum improvement to include
args = parser.parse_args()
results = pd.read_csv(args.input,sep=' ',header=None)
# Write out columns for prediction:
results[0].to_csv(os.path.join(args.folder,'col1.csv'),index=False)
results[1].to_csv(os.path.join(args.folder,'col2.csv'),index=False)
# Use Chemprop to predict solubilities:
os.system('python /data/rsg/chemistry/cbilod/chemprop/predict.py --test_path '+os.path.join(args.folder,'col1.csv')+' --checkpoint_dir '+args.predictor+' --preds_path '+os.path.join(args.folder,'preds_col1.csv'))
os.system('python /data/rsg/chemistry/cbilod/chemprop/predict.py --test_path '+os.path.join(args.folder,'col2.csv')+' --checkpoint_dir '+args.predictor+' --preds_path '+os.path.join(args.folder,'preds_col2.csv'))
# Read in and organize predictions:
preds1 = pd.read_csv(os.path.join(args.folder,'preds_col1.csv'))
preds1 = preds1.rename(columns={"0":"Mol1","Solubility":"Sol1"})
preds2 = pd.read_csv(os.path.join(args.folder,'preds_col2.csv'))
preds2 = preds2.rename(columns={"1":"Mol2","Solubility":"Sol2"})
preds_tot = pd.concat((preds1,preds2),axis=1)
# New pairs are molecules that have been improved beyond their threshold:
new_pairs = preds_tot[preds_tot['Sol2']-preds_tot['Sol1']>args.threshold][['Mol1','Mol2']]
# Toxicity constraint
if args.tox == True:
new_pairs = apply_constraints(new_pairs,args.tox_predictor,args.folder)
# Combine new pairs with old pairs
old_pairs = | pd.read_csv(args.old_pairs,delimiter=' ',header=None) | pandas.read_csv |
#python script to convert a croesus rebalancing output to a national bank independent network
#mutual fund trade list.
import pandas as pd
import numpy as np
import argparse
import sys
import datetime as dt
import os
from enum import Enum
from pathlib import Path, PureWindowsPath
pd.set_option('display.min_rows', 100)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
class OrderType(Enum):
SELL=6
BUY=5
SWITCH=8
class AmountTypeCode(Enum):
DOLLAR_AMOUNT='D'
SHARES='S'
ALL_SHARES='A'
DSC_FREE='F'
MATURED_ONY='M'
FREE_ONLY='T' #ten percent free
class DividendOption(Enum):
CASH=4,
REINVEST=1
#!this will reset when the script starts
default_trade_amount_type=AmountTypeCode.DOLLAR_AMOUNT
source_id="ABCD"
amount_type_code="D"
blank_gross=""
client_paid_commission="0"
dividend_option=""
blank_from_id=""
additional_commission="0"
mvsc="Market Value Security Currency"
def getTemplateFileName():
return Path(__file__).parent/"Mutual Fund File Template.xlsx"
def process_file(infile,projectedfile,outfile,etpfile):
#read the croesus projected portfolio to determine quantity variations.
#this will be used to edit the trade list orders.
#.set_index(["Symbol", "Account No."])
projected_df=pd.read_excel(projectedfile,skiprows=1)[["Account No.","Symbol","Qty Variation", "Quantity"]]
projected_df["Sell All"] = False
#find all the orders to sell all by looking for sell orders with 0 remaining quantity
projected_df.loc[ (projected_df["Qty Variation"] < 0) & (projected_df["Quantity"] == 0),"Sell All"] = True
projected_df.sort_values(["Account No.","Symbol"],inplace=True)
projected_df = projected_df.set_index(["Account No.","Symbol"])
#we only want the "Sell All" column from the projected portfolio. some of the other columns have a different
#meaning thant the columns with the same name in the generated orders.
projected_df=projected_df["Sell All"]
df=pd.read_excel(infile,dtype = {"Security" : str} )
#remove whtiespace on the security column.
df["Security"]=df["Security"].str.strip()
df.sort_values(["Account No.","Symbol","Type"],inplace=True)
df = df.set_index(["Account No.","Symbol"])
df = df.join(projected_df)
df.reset_index(inplace=True)
if False:
#this is for reading csv, currently unused
#pandas doesn't handle "," in strings when converting to numeric
#rip the commas out
def fn(str):
return str.replace(",","")
df[mvsc]=df[mvsc].apply(fn)
# print(df[mvsc])
#now convert to numeric
df[mvsc]= | pd.to_numeric(df[mvsc]) | pandas.to_numeric |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 28 14:35:23 2018
@author: kazuki.onodera
"""
import os
import pandas as pd
import gc
from multiprocessing import Pool
from glob import glob
import utils
utils.start(__file__)
#==============================================================================
KEY = 'SK_ID_CURR'
PREF = 'cre'
NTHREAD = 3
col_num = ['AMT_BALANCE', 'AMT_CREDIT_LIMIT_ACTUAL', 'AMT_DRAWINGS_ATM_CURRENT',
'AMT_DRAWINGS_CURRENT', 'AMT_DRAWINGS_OTHER_CURRENT',
'AMT_DRAWINGS_POS_CURRENT', 'AMT_INST_MIN_REGULARITY',
'AMT_PAYMENT_CURRENT', 'AMT_PAYMENT_TOTAL_CURRENT',
'AMT_RECEIVABLE_PRINCIPAL', 'AMT_RECIVABLE', 'AMT_TOTAL_RECEIVABLE',
'CNT_DRAWINGS_ATM_CURRENT', 'CNT_DRAWINGS_CURRENT',
'CNT_DRAWINGS_OTHER_CURRENT', 'CNT_DRAWINGS_POS_CURRENT',
'CNT_INSTALMENT_MATURE_CUM', 'SK_DPD', 'SK_DPD_DEF']
col_cat = ['CNT_DRAWINGS_OTHER_CURRENT', 'NAME_CONTRACT_STATUS']
col_group = ['SK_ID_PREV', 'CNT_DRAWINGS_OTHER_CURRENT', 'NAME_CONTRACT_STATUS']
# =============================================================================
# feature
# =============================================================================
cre = utils.read_pickles('../data/credit_card_balance')
base = cre[[KEY]].drop_duplicates().set_index(KEY)
def nunique(x):
return len(set(x))
# =============================================================================
# cat
# =============================================================================
for c1 in col_cat:
gc.collect()
print(c1)
df_sum = pd.crosstab(cre[KEY], cre[c1])
df_sum.columns = [f'{PREF}_{c1}_{str(c2).replace(" ", "-")}_sum' for c2 in df_sum.columns]
df_norm = pd.crosstab(cre[KEY], cre[c1], normalize='index')
df_norm.columns = [f'{PREF}_{c1}_{str(c2).replace(" ", "-")}_norm' for c2 in df_norm.columns]
df = pd.concat([df_sum, df_norm], axis=1)
col = df.columns.tolist()
base = pd.concat([base, df], axis=1)
base[col] = base[col].fillna(-1)
# =============================================================================
# merge
# =============================================================================
base.reset_index(inplace=True)
train = utils.load_train([KEY])
train = | pd.merge(train, base, on=KEY, how='left') | pandas.merge |
"""This module is the **core** of `FinQuant`. It provides
- a public class ``Stock`` that holds and calculates quantities of a single stock,
- a public class ``Portfolio`` that holds and calculates quantities of a financial
portfolio, which is a collection of Stock instances.
- a public function ``build_portfolio()`` that automatically constructs and returns
an instance of ``Portfolio`` and instances of ``Stock``. The relevant stock
data is either retrieved through `quandl`/`yfinance` or provided by the user as a
``pandas.DataFrame`` (after loading it manually from disk/reading from file).
For an example on how to use it, please read the corresponding docstring,
or have a look at the examples in the sub-directory ``example``.
The classes ``Stock`` and ``Portfolio`` are designed to easily manage your
financial portfolio, and make the most common quantitative calculations, such as:
- cumulative returns of the portfolio's stocks
- daily returns of the portfolio's stocks (daily percentage change),
- daily log returns of the portfolio's stocks,
- Expected (annualised) Return,
- Volatility,
- Sharpe Ratio,
- skewness of the portfolio's stocks,
- Kurtosis of the portfolio's stocks,
- the portfolio's covariance matrix.
Furthermore, the constructed portfolio can be optimised for
- minimum Volatility,
- maximum Sharpe Ratio
- minimum Volatility for a given Expected Return
- maximum Sharpe Ratio for a given target Volatility
by either performing a numerical computation to solve a minimisation problem, or by performing a Monte Carlo simulation of `n` trials.
The former should be the preferred method for reasons of computational effort
and accuracy. The latter is only included for the sake of completeness.
Finally, functions are implemented to generated the following plots:
- Monte Carlo run to find optimal portfolio(s)
- Efficient Frontier
- Portfolio with the minimum Volatility based a numerical optimisation
- Portfolio with the maximum Sharpe Ratio based on a numerical optimisation
- Portfolio with the minimum Volatility for a given Expected Return based
on a numerical optimisation
- Portfolio with the maximum Sharpe Ratio for a given target Volatility
based on a numerical optimisation
- Individual stocks of the portfolio (Expected Return over Volatility)
"""
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from finquant.quants import weighted_mean, weighted_std, sharpe_ratio
from finquant.returns import historical_mean_return
from finquant.returns import daily_returns, cumulative_returns
from finquant.returns import daily_log_returns
from finquant.efficient_frontier import EfficientFrontier
from finquant.monte_carlo import MonteCarloOpt
class Stock(object):
"""Object that contains information about a stock/fund.
To initialise the object, it requires a name, information about
the stock/fund given as one of the following data structures:
- ``pandas.Series``
- ``pandas.DataFrame``
The investment information can contain as little information as its name,
and the amount invested in it, the column labels must be ``Name`` and ``Allocation``
respectively, but it can also contain more information, such as
- Year
- Strategy
- CCY
- etc.
It also requires either data, e.g. daily closing prices as a
``pandas.DataFrame`` or ``pandas.Series``.
``data`` must be given as a ``pandas.DataFrame``, and at least one data column
is required to containing the closing price, hence it is required to
contain one column label ``<stock_name> - Adj. Close`` which is used to
compute the return of investment. However, ``data`` can contain more
data in additional columns.
"""
def __init__(self, investmentinfo, data):
"""
:Input:
:investmentinfo: ``pandas.DataFrame`` of investment information
:data: ``pandas.DataFrame`` of stock price
"""
self.name = investmentinfo.Name
self.investmentinfo = investmentinfo
self.data = data
# compute expected return and volatility of stock
self.expected_return = self.comp_expected_return()
self.volatility = self.comp_volatility()
self.skew = self._comp_skew()
self.kurtosis = self._comp_kurtosis()
# functions to compute quantities
def comp_daily_returns(self):
"""Computes the daily returns (percentage change).
See ``finquant.returns.daily_returns``.
"""
return daily_returns(self.data)
def comp_expected_return(self, freq=252):
"""Computes the Expected Return of the stock.
:Input:
:freq: ``int`` (default: ``252``), number of trading days, default
value corresponds to trading days in a year
:Output:
:expected_return: Expected Return of stock.
"""
return historical_mean_return(self.data, freq=freq)
def comp_volatility(self, freq=252):
"""Computes the Volatility of the stock.
:Input:
:freq: ``int`` (default: ``252``), number of trading days, default
value corresponds to trading days in a year
:Output:
:volatility: Volatility of stock.
"""
return self.comp_daily_returns().std() * np.sqrt(freq)
def _comp_skew(self):
"""Computes and returns the skewness of the stock."""
return self.data.skew().values[0]
def _comp_kurtosis(self):
"""Computes and returns the Kurtosis of the stock."""
return self.data.kurt().values[0]
def properties(self):
"""Nicely prints out the properties of the stock: Expected Return,
Volatility, Skewness, Kurtosis as well as the ``Allocation`` (and other
information provided in investmentinfo.)
"""
# nicely printing out information and quantities of the stock
string = "-" * 50
string += "\nStock: {}".format(self.name)
string += "\nExpected Return:{:0.3f}".format(self.expected_return.values[0])
string += "\nVolatility: {:0.3f}".format(self.volatility.values[0])
string += "\nSkewness: {:0.5f}".format(self.skew)
string += "\nKurtosis: {:0.5f}".format(self.kurtosis)
string += "\nInformation:"
string += "\n" + str(self.investmentinfo.to_frame().transpose())
string += "\n"
string += "-" * 50
print(string)
def __str__(self):
# print short description
string = "Contains information about " + str(self.name) + "."
return string
class Portfolio(object):
"""Object that contains information about a investment portfolio.
To initialise the object, it does not require any input.
To fill the portfolio with investment information, the
function ``add_stock(stock)`` should be used, in which ``stock`` is
an object of ``Stock``.
"""
def __init__(self):
"""Initiates ``Portfolio``."""
# initilisating instance variables
self.portfolio = | pd.DataFrame() | pandas.DataFrame |
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyspark.sql.types import *
from pyspark.sql.functions import pandas_udf, PandasUDFType
import pyarrow as pa
import pandas as pd
def render_point_map(df, vega):
schema = StructType([StructField('buffer', StringType(), True)])
@pandas_udf(schema, PandasUDFType.MAP_ITER)
def point_map_UDF(batch_iter, conf = vega):
for pdf in batch_iter:
pdf = pdf.drop_duplicates()
arr_x = pa.array(pdf.x, type='uint32')
arr_y = pa.array(pdf.y, type='uint32')
from arctern_gis import point_map
res = point_map(arr_x, arr_y, conf.encode('utf-8'))
buffer = res.buffers()[1].hex()
buf_df = pd.DataFrame([(buffer,)],["buffer"])
yield buf_df
hex_data = df.mapInPandas(point_map_UDF).collect()[0][0]
return hex_data
def render_heat_map(df, vega):
agg_schema = StructType([StructField('x', IntegerType(), True),
StructField('y', IntegerType(), True),
StructField('c', IntegerType(), True)])
@pandas_udf(agg_schema, PandasUDFType.MAP_ITER)
def render_agg_UDF(batch_iter):
for pdf in batch_iter:
res = pdf.groupby(['x','y'])
res = res['c'].agg(['sum']).reset_index()
res.columns = ['x', 'y', 'c']
yield res
schema = StructType([StructField('buffer', StringType(), True)])
@pandas_udf(schema, PandasUDFType.MAP_ITER)
def heat_map_UDF(batch_iter, conf = vega):
for pdf in batch_iter:
arrs = pdf.groupby(['x','y'])['c'].agg(['sum']).reset_index()
arrs.columns = ['x', 'y', 'c']
arr_x = pa.array(arrs.x, type='uint32')
arr_y = pa.array(arrs.y, type='uint32')
arr_c = pa.array(arrs.c, type='uint32')
from arctern_gis import heat_map
res = heat_map(arr_x, arr_y, arr_c, conf.encode('utf-8'))
buffer = res.buffers()[1].hex()
buf_df = | pd.DataFrame([(buffer,)],["buffer"]) | pandas.DataFrame |
import pandas as pd
import geopandas as gpd
def query_df(df, att, val):
val = '\''+val+'\'' if isinstance(val, str) else val
return df.query( f" {att} == {val} " )
def gdf_concat(lst):
return gpd.GeoDataFrame( | pd.concat(lst) | pandas.concat |
#!/usr/bin/env python
"""
Copyright 2018 by <NAME> (alohawild) and <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================
This program read the city file for Kaggle contest, a previous path answer, and
applies Monte Carlo and greedy improvement over a number of epochs. It then writes
the results out to a requested file name and log table.
Known flaw: Does not take in "prime carrot" for greedy and travel time seems 1/2
percent off.
"""
__author__ = 'michaelwild'
__copyright__ = "Copyright (C) 2018 <NAME> and <NAME>"
__license__ = "Apache License, Version 2.0"
__version__ = "0.0.1"
__credits__ = ["<NAME>", "<NAME>"]
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Initial"
import numpy as np
import pandas as pd
from scipy import spatial
from time import process_time
# ######################## shared ##############################
def isprime(x):
"""
This is the old way of doing this...But easy
"""
if x < 2:
return False # Number 1 or less is
if x == 2: # Number is 2
return True
if x % 2 == 0: # Number is even
return False
if x > 2: # The rest
for i in range(3, int(np.sqrt(x))+1, 2):
if x % i == 0:
return False
return True
def calced(start_point, end_point):
"""
Calculate and return as a single number the distance between two points
I used this instead of writing it out to avoid a typo that would be impossible to find
:param start_point: [[x, y]] array of points
:param end_point: [[x, y]] array of points
:return: real number of distance
"""
euclidean_distance = spatial.distance.cdist(start_point, end_point, "euclidean")
euclidean_distance = euclidean_distance[0] # Force to list of one number
euclidean_distance = euclidean_distance[0] # Force to number
return euclidean_distance
def run_time(start):
"""
Just takes in previous time and returns elapsed time
:param start: start time
:return: elapsed time
"""
return process_time() - start
def dist_city(start_city, end_city, dict):
"""
Measures distance between two cities
:param start_city: number of city (int)
:param end_city: number of other city
:param dict: The dictionary with the cites in it.
:return: returns the euclidian distance
"""
if (start_city < len(dict)) & (end_city < len(dict)):
start_row = dict[start_city]
end_row = dict[end_city]
start_point = [[start_row['X'], start_row['Y']]]
end_point = [[end_row['X'], end_row['Y']]]
ed = calced(start_point, end_point)
else:
raise Exception('Bad city number')
return ed
# ######################## Classes ##############################
class AlignData:
"""
This is a helper class. It is used to align the data.
"""
# def __init__(self):
def alignnow(self, df, verbose=False):
"""
Adds prime flag to initial data set
:param df: data frame from initial load
:param verbose: just for tracing
:return: aligned data frame with primes identified
"""
# Is city a prime?
df['Prime'] = False
# Adjust
df['Prime'] = df[['CityId', 'Prime']].apply(lambda x:
True if isprime(x['CityId']) else False, axis=1)
if verbose:
print(df)
return df
def travel_time(self, dict_path, dict_cities, verbose=False):
"""
My almost working travel time estimation. Seems to be off a bit from Kaggle.
:param dict_path: dictionary version of path
:param dict_cities: dictionary of cities aligned from above
:param verbose: Print trace
:return: total euclidian dist with carrot logic to add 10% (off by 1/2 % from Kaggle)
"""
length = len(dict_path) -1
previous_city = 0 # Always North Pole, zero
count = 1 # Skip pole and first entry as it is not a step
previous = False # Used to track that previous step caused no carrot add-on
ed_total = 0.0 # Total distance
step = 1 # Step we are on
while True:
if count > length:
break
row = dict_path[count]
city = row['Path']
euclidean_distance = dist_city(previous_city, city, dict_cities)
if previous:
ed = euclidean_distance * 1.1
previous = False
else:
ed = euclidean_distance
ed_total = ed_total + ed
if step == 10:
step = 0
if ~(dict_cities[count]['Prime']):
previous = True
previous_city = city
count = count + 1
step = step + 1
if verbose:
print("Path =", ed_total)
return ed_total
def improve_random(self, dict_path, dict_cities, epochs, verbose=True):
"""
The Monte Carlo greedy improvement. Runs an epoch of reading every entry in path and randomly trying to update
it with a better path. Checks if path is better. If it is then replace.
Works until about 75million paths and then value decreases.
:param dict_path: The dictionary of path to improve
:param dict_cities: Aligned dictionary of cities from above
:param epochs: How many times to pass-thru the whole list
:param verbose: Trace
:return: improved path in dictionary form, list of logs from epoch
"""
epoch_count = 1
logs = []
if verbose:
print("Starting Epochs:", epochs)
while epoch_count <= epochs:
if verbose:
print("Epoch #", epoch_count)
count = 1 # Skip pole and first entry as it is not a step
length = len(dict_path) - 3
better = 0.0
start_time = process_time()
while True:
if count > length:
break
picked = np.random.randint(length) + 1 # 1..(end -1)
# Get current impact to distance from current placement
cur = dict_path[count]['Path']
chg = dict_path[picked]['Path']
p_cur = dict_path[count - 1]['Path']
n_cur = dict_path[count + 1]['Path']
p_chg = dict_path[picked - 1]['Path']
n_chg = dict_path[picked + 1]['Path']
# Get current impact to distance from current placement
curr_dist = dist_city(p_cur, cur, dict_cities) + \
dist_city(cur, n_cur, dict_cities) + \
dist_city(p_chg, chg, dict_cities) + \
dist_city(chg, n_chg, dict_cities)
# Get new impact to distance from change
new_dist = dist_city(p_cur, chg, dict_cities) + \
dist_city(chg, n_cur, dict_cities) + \
dist_city(p_chg, cur, dict_cities) + \
dist_city(cur, n_chg, dict_cities)
check_dist = curr_dist - new_dist
if check_dist > 0.0:
better = better + check_dist
dict_path[count] = {'Path': chg}
dict_path[picked] = {'Path': cur}
count = count + 1
exc_time = run_time(start_time)
if verbose:
print("Better =", better)
print("Epoch", epoch_count," Improvement:", better," Execution time:", exc_time)
logs.append([epoch_count, better, exc_time])
epoch_count = epoch_count + 1
return dict_path, logs
# ######################## Start up ##############################
print("Improve Raindeer")
print(__version__, " ", __copyright__, " ", __license__)
begin_time = process_time()
name = input("What file to process (default=path.csv) ")
if name == "":
name = "path.csv"
df_path = pd.read_csv(name)
dict_path = df_path.to_dict('records')
name_out = input("What file to output (default=deerimp.csv) ")
if name_out == "":
name_out = "deerimp.csv"
name_log = input("What file to output log (default=deerimplog.txt) ")
if name_log == "":
name_log = "deerimplog.txt"
name = input("How many runs (default=5) ")
if name == "":
name = "5"
runs = int(name)
print(" Get City Data and align....")
start_time = process_time()
# get data and add prime column in a fine DF for easy of use
align = AlignData()
df_cities = align.alignnow( | pd.read_csv('cities.csv') | pandas.read_csv |
from collections import Counter
import pandas as pd
from aa.unit import Army
from .battle import Battle, LandBattle
from .utils import battle_factory
new_battle = battle_factory(army_cls=Army, battle_cls=Battle)
new_land_battle = battle_factory(army_cls=Army, battle_cls=LandBattle)
def simulate_battles(battle_config, count, factory):
wins = []
stats = []
for _ in range(count):
b = factory(config=battle_config)
b.simulate()
wins.append(b.winner)
stats.append(b.stats())
win_series = pd.Series(wins)
win_summary = win_series.value_counts().to_dict()
win_summary['total_played'] = int(win_series.count())
stats_df = | pd.DataFrame(stats) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import pandas as pd
import scrapy
from scrapy import Request
from scrapy import Selector
from scrapy import signals
from fooltrader.api.quote import get_security_list
from fooltrader.consts import DEFAULT_KDATA_HEADER
from fooltrader.contract.files_contract import get_finance_report_event_path
from fooltrader.utils.utils import index_df_with_time
class StockFinanceReportEventSpider(scrapy.Spider):
name = "stock_finance_report_event"
custom_settings = {
'DOWNLOAD_DELAY': 2,
'CONCURRENT_REQUESTS_PER_DOMAIN': 8,
'SPIDER_MIDDLEWARES': {
'fooltrader.middlewares.FoolErrorMiddleware': 1000,
}
}
def start_requests(self):
security_item = self.settings.get("security_item")
if security_item is not None:
for request in self.yield_request(security_item):
yield request
else:
for _, item in get_security_list().iterrows():
for request in self.yield_request(item):
yield request
def yield_request(self, item):
# 一季度报告,中期报告,三季度报告,年度报告
for period_type in ['yjdbg', 'zqbg', 'sjdbg', 'ndbg']:
url = self.get_finance_report_event_url(item['code'], period_type)
yield Request(url=url, headers=DEFAULT_KDATA_HEADER,
meta={'item': item,
'period_type': period_type},
callback=self.download_fi_report_event_data)
@staticmethod
def report_period_from_title(title, period_type, report_event_date):
try:
year = re.match('.*(\d{4}).*', title).group(1)
report_event_year = | pd.Timestamp(report_event_date) | pandas.Timestamp |
#---------------------------------------------------------------------
# File Name : LogisticRegression2.py
# Author : <NAME>.
# Description : Implementing Logistic Regression
# Date: : 12 Nov. 2020
# Version : V1.0
# Ref No : DS_Code_P_K07
#---------------------------------------------------------------------
import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import train_test_split # train and test
from sklearn import metrics
from sklearn import preprocessing
from sklearn.metrics import classification_report
# loading claimants data
claimants = pd.read_csv("claimants.csv")
claimants.head(10)
# Droping first column
claimants.drop(["CASENUM"],inplace=True,axis = 1)
#cat_cols = ["ATTORNEY","CLMSEX","SEATBELT","CLMINSUR"]
#cont_cols = ["CLMAGE","LOSS"]
# Getting the barplot for the categorical columns
sb.countplot(x="ATTORNEY",data=claimants,palette="hls")
pd.crosstab(claimants.ATTORNEY,claimants.CLMINSUR).plot(kind="bar")
sb.countplot(x="CLMSEX",data=claimants,palette="hls")
pd.crosstab(claimants.CLMSEX,claimants.CLMINSUR).plot(kind="bar")
sb.countplot(x="SEATBELT",data=claimants,palette="hls")
pd.crosstab(claimants.SEATBELT,claimants.CLMINSUR).plot(kind="bar")
sb.countplot(x="CLMINSUR",data=claimants,palette="hls")
# Data Distribution - Boxplot of continuous variables wrt to each category of categorical columns
sb.boxplot(x="ATTORNEY",y="CLMAGE",data=claimants,palette="hls")
sb.boxplot(x="ATTORNEY",y="LOSS",data=claimants,palette="hls")
sb.boxplot(x="CLMSEX",y="CLMAGE",data=claimants,palette="hls")
sb.boxplot(x="CLMSEX",y="LOSS",data=claimants,palette="hls")
sb.boxplot(x="SEATBELT",y="CLMAGE",data=claimants,palette="hls")
sb.boxplot(x="SEATBELT",y="LOSS",data=claimants,palette="hls")
sb.boxplot(x="CLMINSUR",y="CLMAGE",data=claimants,palette="hls")
sb.boxplot(x="CLMINSUR",y="LOSS",data=claimants,palette="hls")
# To get the count of null values in the data
claimants.isnull().sum()
claimants.shape # 1340 6 => Before dropping null values
# To drop null values ( dropping rows)
claimants.dropna().shape # 1096 6 => After dropping null values
# Fill nan values with mode of the categorical column
claimants["CLMSEX"].fillna(1,inplace=True) # claimants.CLMSEX.mode() = 1
claimants["CLMINSUR"].fillna(1,inplace=True) # claimants.CLMINSUR.mode() = 1
claimants["SEATBELT"].fillna(0,inplace=True) # claimants.SEATBELT.mode() = 0
claimants["CLMSEX"].fillna(1,inplace=True) # claimants.CLMSEX.mode() = 1
claimants.CLMAGE.fillna(28.4144,inplace=True) # claimants.CLMAGE.mean() = 28.4
# Model building
from sklearn.linear_model import LogisticRegression
claimants.shape
X = claimants.iloc[:,[1,2,3,4,5]]
Y = claimants.iloc[:,0]
classifier = LogisticRegression()
classifier.fit(X,Y)
classifier.coef_ # coefficients of features
classifier.predict_proba (X) # Probability values
y_pred = classifier.predict(X)
claimants["y_pred"] = y_pred
y_prob = pd.DataFrame(classifier.predict_proba(X.iloc[:,:]))
new_df = pd.concat([claimants,y_prob],axis=1)
from sklearn.metrics import confusion_matrix
confusion_matrix = confusion_matrix(Y,y_pred)
print (confusion_matrix)
type(y_pred)
accuracy = sum(Y==y_pred)/claimants.shape[0]
| pd.crosstab(y_pred,Y) | pandas.crosstab |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 16 10:27:53 2022
@author: dariu
"""
import numpy as np
import pandas as pd
import os
from tqdm import tqdm
import pacmap
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import umap
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
#import sklearn.cluster
from sklearn.decomposition import PCA
from sklearn import metrics
from sklearn.cluster import OPTICS, cluster_optics_dbscan
import matplotlib.gridspec as gridspec
from sklearn.cluster import SpectralCoclustering
from sklearn.metrics import consensus_score
from sklearn.cluster import SpectralBiclustering
from sklearn import svm
from sklearn.model_selection import train_test_split
from imblearn.under_sampling import NearMiss
from imblearn.pipeline import make_pipeline
from imblearn.metrics import classification_report_imbalanced
from sklearn.metrics import RocCurveDisplay
path = "C:\\Users\dariu\\Documents\\Master Wirtschaftsinformatik\\Data Challenges\Data\\"
directorys = [
['training_setA/training/', 'p0'],
['training_setB/training_setB/', 'p1']
]
#%%
dfs = []
for z, (directory, file_head) in enumerate(directorys):
for i, filename in enumerate(tqdm(os.listdir(path + directory))):
df_temp = pd.read_csv(path + directory + filename, skiprows=0, sep='|')
# patient_gender = df_temp["Gender"][1]
# if df_temp["Age"][1] >= 40:
dfs.append(df_temp)
df = | pd.concat(dfs) | pandas.concat |
import pytest
from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG, _period_code_map
from pandas.errors import OutOfBoundsDatetime
from pandas import Period, Timestamp, offsets
class TestFreqConversion:
"""Test frequency conversion of date objects"""
@pytest.mark.parametrize("freq", ["A", "Q", "M", "W", "B", "D"])
def test_asfreq_near_zero(self, freq):
# GH#19643, GH#19650
per = Period("0001-01-01", freq=freq)
tup1 = (per.year, per.hour, per.day)
prev = per - 1
assert prev.ordinal == per.ordinal - 1
tup2 = (prev.year, prev.month, prev.day)
assert tup2 < tup1
def test_asfreq_near_zero_weekly(self):
# GH#19834
per1 = Period("0001-01-01", "D") + 6
per2 = Period("0001-01-01", "D") - 6
week1 = per1.asfreq("W")
week2 = per2.asfreq("W")
assert week1 != week2
assert week1.asfreq("D", "E") >= per1
assert week2.asfreq("D", "S") <= per2
def test_to_timestamp_out_of_bounds(self):
# GH#19643, used to incorrectly give Timestamp in 1754
per = Period("0001-01-01", freq="B")
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
per.to_timestamp()
def test_asfreq_corner(self):
val = Period(freq="A", year=2007)
result1 = val.asfreq("5t")
result2 = val.asfreq("t")
expected = Period("2007-12-31 23:59", freq="t")
assert result1.ordinal == expected.ordinal
assert result1.freqstr == "5T"
assert result2.ordinal == expected.ordinal
assert result2.freqstr == "T"
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq="A", year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq="Q", year=2007, quarter=1)
ival_A_to_Q_end = Period(freq="Q", year=2007, quarter=4)
ival_A_to_M_start = Period(freq="M", year=2007, month=1)
ival_A_to_M_end = Period(freq="M", year=2007, month=12)
ival_A_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq="W", year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq="B", year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq="D", year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq="H", year=2007, month=12, day=31, hour=23)
ival_A_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_A_to_T_end = Period(
freq="Min", year=2007, month=12, day=31, hour=23, minute=59
)
ival_A_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_A_to_S_end = Period(
freq="S", year=2007, month=12, day=31, hour=23, minute=59, second=59
)
ival_AJAN_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq="D", year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq="D", year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq="D", year=2006, month=12, day=1)
assert ival_A.asfreq("Q", "S") == ival_A_to_Q_start
assert ival_A.asfreq("Q", "e") == ival_A_to_Q_end
assert ival_A.asfreq("M", "s") == ival_A_to_M_start
assert ival_A.asfreq("M", "E") == ival_A_to_M_end
assert ival_A.asfreq("W", "S") == ival_A_to_W_start
assert ival_A.asfreq("W", "E") == ival_A_to_W_end
assert ival_A.asfreq("B", "S") == ival_A_to_B_start
assert ival_A.asfreq("B", "E") == ival_A_to_B_end
assert ival_A.asfreq("D", "S") == ival_A_to_D_start
assert ival_A.asfreq("D", "E") == ival_A_to_D_end
assert ival_A.asfreq("H", "S") == ival_A_to_H_start
assert ival_A.asfreq("H", "E") == ival_A_to_H_end
assert ival_A.asfreq("min", "S") == ival_A_to_T_start
assert ival_A.asfreq("min", "E") == ival_A_to_T_end
assert ival_A.asfreq("T", "S") == ival_A_to_T_start
assert ival_A.asfreq("T", "E") == ival_A_to_T_end
assert ival_A.asfreq("S", "S") == ival_A_to_S_start
assert ival_A.asfreq("S", "E") == ival_A_to_S_end
assert ival_AJAN.asfreq("D", "S") == ival_AJAN_to_D_start
assert ival_AJAN.asfreq("D", "E") == ival_AJAN_to_D_end
assert ival_AJUN.asfreq("D", "S") == ival_AJUN_to_D_start
assert ival_AJUN.asfreq("D", "E") == ival_AJUN_to_D_end
assert ival_ANOV.asfreq("D", "S") == ival_ANOV_to_D_start
assert ival_ANOV.asfreq("D", "E") == ival_ANOV_to_D_end
assert ival_A.asfreq("A") == ival_A
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq="Q", year=2007, quarter=1)
ival_Q_end_of_year = Period(freq="Q", year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq="A", year=2007)
ival_Q_to_M_start = Period(freq="M", year=2007, month=1)
ival_Q_to_M_end = Period(freq="M", year=2007, month=3)
ival_Q_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq="W", year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq="B", year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq="D", year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq="H", year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_Q_to_T_end = Period(
freq="Min", year=2007, month=3, day=31, hour=23, minute=59
)
ival_Q_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_Q_to_S_end = Period(
freq="S", year=2007, month=3, day=31, hour=23, minute=59, second=59
)
ival_QEJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq="D", year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq="D", year=2006, month=9, day=30)
assert ival_Q.asfreq("A") == ival_Q_to_A
assert ival_Q_end_of_year.asfreq("A") == ival_Q_to_A
assert ival_Q.asfreq("M", "S") == ival_Q_to_M_start
assert ival_Q.asfreq("M", "E") == ival_Q_to_M_end
assert ival_Q.asfreq("W", "S") == ival_Q_to_W_start
assert ival_Q.asfreq("W", "E") == ival_Q_to_W_end
assert ival_Q.asfreq("B", "S") == ival_Q_to_B_start
assert ival_Q.asfreq("B", "E") == ival_Q_to_B_end
assert ival_Q.asfreq("D", "S") == ival_Q_to_D_start
assert ival_Q.asfreq("D", "E") == ival_Q_to_D_end
assert ival_Q.asfreq("H", "S") == ival_Q_to_H_start
assert ival_Q.asfreq("H", "E") == ival_Q_to_H_end
assert ival_Q.asfreq("Min", "S") == ival_Q_to_T_start
assert ival_Q.asfreq("Min", "E") == ival_Q_to_T_end
assert ival_Q.asfreq("S", "S") == ival_Q_to_S_start
assert ival_Q.asfreq("S", "E") == ival_Q_to_S_end
assert ival_QEJAN.asfreq("D", "S") == ival_QEJAN_to_D_start
assert ival_QEJAN.asfreq("D", "E") == ival_QEJAN_to_D_end
assert ival_QEJUN.asfreq("D", "S") == ival_QEJUN_to_D_start
assert ival_QEJUN.asfreq("D", "E") == ival_QEJUN_to_D_end
assert ival_Q.asfreq("Q") == ival_Q
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq="M", year=2007, month=1)
ival_M_end_of_year = Period(freq="M", year=2007, month=12)
ival_M_end_of_quarter = Period(freq="M", year=2007, month=3)
ival_M_to_A = Period(freq="A", year=2007)
ival_M_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_M_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq="W", year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq="B", year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq="H", year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_M_to_T_end = Period(
freq="Min", year=2007, month=1, day=31, hour=23, minute=59
)
ival_M_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_M_to_S_end = Period(
freq="S", year=2007, month=1, day=31, hour=23, minute=59, second=59
)
assert ival_M.asfreq("A") == ival_M_to_A
assert ival_M_end_of_year.asfreq("A") == ival_M_to_A
assert ival_M.asfreq("Q") == ival_M_to_Q
assert ival_M_end_of_quarter.asfreq("Q") == ival_M_to_Q
assert ival_M.asfreq("W", "S") == ival_M_to_W_start
assert ival_M.asfreq("W", "E") == ival_M_to_W_end
assert ival_M.asfreq("B", "S") == ival_M_to_B_start
assert ival_M.asfreq("B", "E") == ival_M_to_B_end
assert ival_M.asfreq("D", "S") == ival_M_to_D_start
assert ival_M.asfreq("D", "E") == ival_M_to_D_end
assert ival_M.asfreq("H", "S") == ival_M_to_H_start
assert ival_M.asfreq("H", "E") == ival_M_to_H_end
assert ival_M.asfreq("Min", "S") == ival_M_to_T_start
assert ival_M.asfreq("Min", "E") == ival_M_to_T_end
assert ival_M.asfreq("S", "S") == ival_M_to_S_start
assert ival_M.asfreq("S", "E") == ival_M_to_S_end
assert ival_M.asfreq("M") == ival_M
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq="W", year=2007, month=1, day=1)
ival_WSUN = Period(freq="W", year=2007, month=1, day=7)
ival_WSAT = Period(freq="W-SAT", year=2007, month=1, day=6)
ival_WFRI = Period(freq="W-FRI", year=2007, month=1, day=5)
ival_WTHU = Period(freq="W-THU", year=2007, month=1, day=4)
ival_WWED = Period(freq="W-WED", year=2007, month=1, day=3)
ival_WTUE = Period(freq="W-TUE", year=2007, month=1, day=2)
ival_WMON = Period(freq="W-MON", year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq="D", year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq="D", year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq="D", year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq="D", year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq="D", year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq="D", year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq="D", year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq="D", year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq="D", year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq="D", year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq="D", year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq="D", year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq="W", year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq="W", year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq="W", year=2007, month=1, day=31)
ival_W_to_A = Period(freq="A", year=2007)
ival_W_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_W_to_M = Period(freq="M", year=2007, month=1)
if Period(freq="D", year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq="A", year=2007)
else:
ival_W_to_A_end_of_year = Period(freq="A", year=2008)
if Period(freq="D", year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=2)
if Period(freq="D", year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=2)
ival_W_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq="B", year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_W_to_H_end = Period(freq="H", year=2007, month=1, day=7, hour=23)
ival_W_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_W_to_T_end = Period(
freq="Min", year=2007, month=1, day=7, hour=23, minute=59
)
ival_W_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_W_to_S_end = Period(
freq="S", year=2007, month=1, day=7, hour=23, minute=59, second=59
)
assert ival_W.asfreq("A") == ival_W_to_A
assert ival_W_end_of_year.asfreq("A") == ival_W_to_A_end_of_year
assert ival_W.asfreq("Q") == ival_W_to_Q
assert ival_W_end_of_quarter.asfreq("Q") == ival_W_to_Q_end_of_quarter
assert ival_W.asfreq("M") == ival_W_to_M
assert ival_W_end_of_month.asfreq("M") == ival_W_to_M_end_of_month
assert ival_W.asfreq("B", "S") == ival_W_to_B_start
assert ival_W.asfreq("B", "E") == ival_W_to_B_end
assert ival_W.asfreq("D", "S") == ival_W_to_D_start
assert ival_W.asfreq("D", "E") == ival_W_to_D_end
assert ival_WSUN.asfreq("D", "S") == ival_WSUN_to_D_start
assert ival_WSUN.asfreq("D", "E") == ival_WSUN_to_D_end
assert ival_WSAT.asfreq("D", "S") == ival_WSAT_to_D_start
assert ival_WSAT.asfreq("D", "E") == ival_WSAT_to_D_end
assert ival_WFRI.asfreq("D", "S") == ival_WFRI_to_D_start
assert ival_WFRI.asfreq("D", "E") == ival_WFRI_to_D_end
assert ival_WTHU.asfreq("D", "S") == ival_WTHU_to_D_start
assert ival_WTHU.asfreq("D", "E") == ival_WTHU_to_D_end
assert ival_WWED.asfreq("D", "S") == ival_WWED_to_D_start
assert ival_WWED.asfreq("D", "E") == ival_WWED_to_D_end
assert ival_WTUE.asfreq("D", "S") == ival_WTUE_to_D_start
assert ival_WTUE.asfreq("D", "E") == ival_WTUE_to_D_end
assert ival_WMON.asfreq("D", "S") == ival_WMON_to_D_start
assert ival_WMON.asfreq("D", "E") == ival_WMON_to_D_end
assert ival_W.asfreq("H", "S") == ival_W_to_H_start
assert ival_W.asfreq("H", "E") == ival_W_to_H_end
assert ival_W.asfreq("Min", "S") == ival_W_to_T_start
assert ival_W.asfreq("Min", "E") == ival_W_to_T_end
assert ival_W.asfreq("S", "S") == ival_W_to_S_start
assert ival_W.asfreq("S", "E") == ival_W_to_S_end
assert ival_W.asfreq("W") == ival_W
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
ival_W.asfreq("WK")
def test_conv_weekly_legacy(self):
# frequency conversion tests: from Weekly Frequency
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
Period(freq="WK", year=2007, month=1, day=1)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-SAT", year=2007, month=1, day=6)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-FRI", year=2007, month=1, day=5)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-THU", year=2007, month=1, day=4)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-WED", year=2007, month=1, day=3)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-TUE", year=2007, month=1, day=2)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-MON", year=2007, month=1, day=1)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq="B", year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq="B", year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq="B", year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq="B", year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq="B", year=2007, month=1, day=5)
ival_B_to_A = Period(freq="A", year=2007)
ival_B_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_B_to_M = Period(freq="M", year=2007, month=1)
ival_B_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_B_to_D = Period(freq="D", year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_B_to_H_end = Period(freq="H", year=2007, month=1, day=1, hour=23)
ival_B_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_B_to_T_end = Period(
freq="Min", year=2007, month=1, day=1, hour=23, minute=59
)
ival_B_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_B_to_S_end = Period(
freq="S", year=2007, month=1, day=1, hour=23, minute=59, second=59
)
assert ival_B.asfreq("A") == ival_B_to_A
assert ival_B_end_of_year.asfreq("A") == ival_B_to_A
assert ival_B.asfreq("Q") == ival_B_to_Q
assert ival_B_end_of_quarter.asfreq("Q") == ival_B_to_Q
assert ival_B.asfreq("M") == ival_B_to_M
assert ival_B_end_of_month.asfreq("M") == ival_B_to_M
assert ival_B.asfreq("W") == ival_B_to_W
assert ival_B_end_of_week.asfreq("W") == ival_B_to_W
assert ival_B.asfreq("D") == ival_B_to_D
assert ival_B.asfreq("H", "S") == ival_B_to_H_start
assert ival_B.asfreq("H", "E") == ival_B_to_H_end
assert ival_B.asfreq("Min", "S") == ival_B_to_T_start
assert ival_B.asfreq("Min", "E") == ival_B_to_T_end
assert ival_B.asfreq("S", "S") == ival_B_to_S_start
assert ival_B.asfreq("S", "E") == ival_B_to_S_end
assert ival_B.asfreq("B") == ival_B
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq="D", year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq="D", year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq="D", year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq="D", year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq="D", year=2007, month=1, day=7)
ival_D_friday = Period(freq="D", year=2007, month=1, day=5)
ival_D_saturday = Period(freq="D", year=2007, month=1, day=6)
ival_D_sunday = Period(freq="D", year=2007, month=1, day=7)
# TODO: unused?
# ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq="B", year=2007, month=1, day=5)
ival_B_monday = Period(freq="B", year=2007, month=1, day=8)
ival_D_to_A = Period(freq="A", year=2007)
ival_Deoq_to_AJAN = Period(freq="A-JAN", year=2008)
ival_Deoq_to_AJUN = Period(freq="A-JUN", year=2007)
ival_Deoq_to_ADEC = Period(freq="A-DEC", year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq="M", year=2007, month=1)
ival_D_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_D_to_H_end = Period(freq="H", year=2007, month=1, day=1, hour=23)
ival_D_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_D_to_T_end = Period(
freq="Min", year=2007, month=1, day=1, hour=23, minute=59
)
ival_D_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_D_to_S_end = Period(
freq="S", year=2007, month=1, day=1, hour=23, minute=59, second=59
)
assert ival_D.asfreq("A") == ival_D_to_A
assert ival_D_end_of_quarter.asfreq("A-JAN") == ival_Deoq_to_AJAN
assert ival_D_end_of_quarter.asfreq("A-JUN") == ival_Deoq_to_AJUN
assert ival_D_end_of_quarter.asfreq("A-DEC") == ival_Deoq_to_ADEC
assert ival_D_end_of_year.asfreq("A") == ival_D_to_A
assert ival_D_end_of_quarter.asfreq("Q") == ival_D_to_QEDEC
assert ival_D.asfreq("Q-JAN") == ival_D_to_QEJAN
assert ival_D.asfreq("Q-JUN") == ival_D_to_QEJUN
assert ival_D.asfreq("Q-DEC") == ival_D_to_QEDEC
assert ival_D.asfreq("M") == ival_D_to_M
assert ival_D_end_of_month.asfreq("M") == ival_D_to_M
assert ival_D.asfreq("W") == ival_D_to_W
assert ival_D_end_of_week.asfreq("W") == ival_D_to_W
assert ival_D_friday.asfreq("B") == ival_B_friday
assert ival_D_saturday.asfreq("B", "S") == ival_B_friday
assert ival_D_saturday.asfreq("B", "E") == ival_B_monday
assert ival_D_sunday.asfreq("B", "S") == ival_B_friday
assert ival_D_sunday.asfreq("B", "E") == ival_B_monday
assert ival_D.asfreq("H", "S") == ival_D_to_H_start
assert ival_D.asfreq("H", "E") == ival_D_to_H_end
assert ival_D.asfreq("Min", "S") == ival_D_to_T_start
assert ival_D.asfreq("Min", "E") == ival_D_to_T_end
assert ival_D.asfreq("S", "S") == ival_D_to_S_start
assert ival_D.asfreq("S", "E") == ival_D_to_S_end
assert ival_D.asfreq("D") == ival_D
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq="H", year=2007, month=12, day=31, hour=23)
ival_H_end_of_quarter = Period(freq="H", year=2007, month=3, day=31, hour=23)
ival_H_end_of_month = Period(freq="H", year=2007, month=1, day=31, hour=23)
ival_H_end_of_week = Period(freq="H", year=2007, month=1, day=7, hour=23)
ival_H_end_of_day = Period(freq="H", year=2007, month=1, day=1, hour=23)
ival_H_end_of_bus = Period(freq="H", year=2007, month=1, day=1, hour=23)
ival_H_to_A = Period(freq="A", year=2007)
ival_H_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_H_to_M = Period(freq="M", year=2007, month=1)
ival_H_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_H_to_D = Period(freq="D", year=2007, month=1, day=1)
ival_H_to_B = Period(freq="B", year=2007, month=1, day=1)
ival_H_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_H_to_T_end = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=59
)
ival_H_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_H_to_S_end = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=59, second=59
)
assert ival_H.asfreq("A") == ival_H_to_A
assert ival_H_end_of_year.asfreq("A") == ival_H_to_A
assert ival_H.asfreq("Q") == ival_H_to_Q
assert ival_H_end_of_quarter.asfreq("Q") == ival_H_to_Q
assert ival_H.asfreq("M") == ival_H_to_M
assert ival_H_end_of_month.asfreq("M") == ival_H_to_M
assert ival_H.asfreq("W") == ival_H_to_W
assert ival_H_end_of_week.asfreq("W") == ival_H_to_W
assert ival_H.asfreq("D") == ival_H_to_D
assert ival_H_end_of_day.asfreq("D") == ival_H_to_D
assert ival_H.asfreq("B") == ival_H_to_B
assert ival_H_end_of_bus.asfreq("B") == ival_H_to_B
assert ival_H.asfreq("Min", "S") == ival_H_to_T_start
assert ival_H.asfreq("Min", "E") == ival_H_to_T_end
assert ival_H.asfreq("S", "S") == ival_H_to_S_start
assert ival_H.asfreq("S", "E") == ival_H_to_S_end
assert ival_H.asfreq("H") == ival_H
def test_conv_minutely(self):
# frequency conversion tests: from Minutely Frequency"
ival_T = | Period(freq="Min", year=2007, month=1, day=1, hour=0, minute=0) | pandas.Period |
# FLOWDO
# FlowDo is an application created for the purpose of managing business activities like Inventory Maintenance, Billing, Sales analysis and other business functions.
# Developed by:
# <NAME> (@Moulishankar10)
# <NAME> (@ToastCoder)
# REQUIRED MODULES
import numpy as np
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
# VISUALIZER FUNCTIONS:
# Used to display keymaps for main menu and every submenu
# LIST OF KEYMAPS TO BE DISPLAYED IN MAIN MENU
def mainOptionsVisualizer():
print("""
\n******************************************** MAIN MENU ***********************************************\n
Press 1 to take a New Order
Press 2 to explore the Revenue options
Press 3 to explore the Inventory properties
Press 9 for exit
\n******************************************************************************************************\n
""")
# LIST OF KEYMAPS TO BE DISPLAYED IN ORDER OPTIONS
def orderOptionsVisualizer():
print("""
\n******************************************** ORDER MENU **********************************************\n
Press 1 to add a new product
Press 2 to remove a product
Press 3 to view the bill
Press 4 to modify your order
Press 5 to proceed your order
Press 9 for exit
\n******************************************************************************************************\n
""")
# LIST OF KEYMAPS TO BE DISPLAYED IN REVENUE OPTIONS
def revenueOptionsVisualizer():
print("""
\n******************************************* REVENUE MENU *********************************************\n
Press 1 to view the Revenue Database
Press 2 to view a Month's Total Revenue
Press 3 to view the product which generated the Maximum Profit in any month
Press 4 to view the product which generated the Minimum Profit in any month
Press 5 to view the Revenue Trend Graph for any year
Press 9 for exit
\n******************************************************************************************************\n
""")
# LIST OF KEYMAPS TO BE DISPLAYED IN INVENTORY OPTIONS
def inventoryOptionsVisualizer():
print("""
\n****************************************** INVENTORY MENU *********************************************\n
Press 1 to view the Stock Inventory
Press 2 to add a new product to your inventory
Press 3 to remove a product from your inventory
Press 4 to modify the properties of existing products
Press 9 for exit
\n*******************************************************************************************************\n
""")
# USED TO CHECK IF THE COLUMN FOR THE MONTH IS CREATED OR NOT
def revMonthChecker():
today = datetime.now()
frmt = today.strftime('%m-%Y')
rev_data = pd.read_csv('data/revenue.csv')
header = list(rev_data.columns)
if frmt not in header:
x = [0]*len(rev_data)
rev_data[frmt] = x
rev_data.to_csv("data/revenue.csv", index = False)
# CLASS FOR BILLING OPERATIONS
class Biller:
def __init__(self,l):
self.prod_name=[]
self.quantity=[]
self.price=[]
self.total_price=[]
self.limit=l
self.ordered = False
self.item_selected = False
def isFull(self):
return len(self.prod_name) == self.limit
def isEmpty(self):
return len(self.prod_name) == 0
# FUNCTION TO ADD A NEW PRODUCT TO THE BILL
def enqueue(self,ele,qn):
if self.isFull():
print("\nMaximum limit reached !")
elif ele.upper() in self.prod_name:
print(f"\n!! '{ele.upper()}' is already in the ordered list !!")
print("\n--- Please refer the 'ORDER MENU' to modify the ordered items ---\n")
else:
inv_data = pd.read_csv('data/inventory.csv')
flag = 0
for i in range(len(inv_data)):
flag = 0
if inv_data["Product_Name"][i] == ele.upper():
if qn.isnumeric() == True:
if int(qn) <= inv_data["Available_Stock"][i]:
self.prod_name.append(ele.upper())
self.quantity.append(int(qn))
self.price.append(inv_data["Selling_Price"][i])
self.item_selected = True
print("\n>>>>>>>> Product is Added to the Order <<<<<<<<\n")
break
else:
print("\n!! Sorry for the inconvenience... Your required product is Out of Stock !!")
break
else:
print("\n!! Invalid Amount of Quantity !!")
break
else:
flag = 1
if flag == 1:
print("\n!! Unavailable Product or Invalid Product !!")
# FUNCTION TO REMOVE A PRODUCT FROM THE BILL
def remove(self):
if self.isEmpty():
print("\n!!! You haven't ordered any product(s) yet to remove !!!\n")
else:
ele = input("\nEnter the product name : ").upper()
if ele in self.prod_name:
ind = self.prod_name.index(ele)
del self.prod_name[ind]
del self.quantity[ind]
del self.price[ind]
del self.total_price[ind]
print("\n>>>>>>>> Product is Removed from the Order <<<<<<<<\n")
else:
print("\n!!! The Specified Product is not in the Order !!!\n")
# FUNCTION TO DISPLAY CONTENTS OF THE BILL
def display(self):
if self.isEmpty():
print("\n!!! You haven't ordered any product(s) yet to generate bill !!!\n")
else:
self.total_price = list(np.array(self.quantity)*np.array(self.price))
form = {'Product Name':self.prod_name,'Quantity':self.quantity,'Cost(1)':self.price,'Total Cost':self.total_price}
res = pd.DataFrame(form)
res.index=list(range(1,len(self.prod_name)+1))
print(res)
print("\n=============================================================\n")
print(f"Total Items : {len(self.prod_name)}")
print(f"Total Quantities : {sum(self.quantity)}")
print(f"Grand Total : Rs.{sum(self.total_price)}")
print("\n=============================================================\n")
# FUNCTION TO MODIFY A PRODUCT NAME OR QUANTITY IN THE BILL
def modify(self):
if self.isEmpty():
print("\n!!! You haven't ordered any product(s) yet to modify !!!\n")
else:
ele = input("\nEnter the product name : ").upper()
if ele in self.prod_name:
ind = self.prod_name.index(ele.upper())
key = int(input("\n Press 1 to modify the product name ..... \n\n Press 2 to modify the quantity .....\n\nYour Option : "))
if key == 1:
self.prod_name[ind] = input("\nEnter the new product name : ").upper()
elif key == 2:
self.quantity[ind] = int(input("\nEnter the new amount of quantity : "))
print("\n>>>>>>>> Updated the Order <<<<<<<<\n")
else:
print("\n!!! The Specified Product is not in the Order !!!\n")
# FUNCTION TO PERFORM THE POST PROCESSING ACTIVITIES ONCE THE BILL IS CONFIRMED
def postProcessor(self):
today = datetime.now()
frmt = today.strftime('%m-%Y')
inv_data = pd.read_csv('data/inventory.csv')
rev_data = pd.read_csv("data/revenue.csv")
for i in range(len(inv_data)):
for j in range(len(self.prod_name)):
if inv_data["Product_Name"][i] == self.prod_name[j]:
inv_data["Available_Stock"][i] -= self.quantity[j]
inv_data.to_csv('data/inventory.csv', index=False)
for i in range(len(rev_data)):
for j in range(len(self.prod_name)):
if rev_data["Product_Name"][i] == self.prod_name[j]:
rev_data[str(frmt)][i] += self.total_price[j]
rev_data.to_csv('data/revenue.csv', index=False)
self.ordered = True
print("\n\n\n -------- Updated the Inventory Data ! -------- \n")
#INDIVIDUAL FUNCTIONS USED IN REVENUE SUB MENU
month = ["January","February","March","April","May","June","July","August","September","October","November","December"]
# FUNCTION TO VIEW THE REVENUE DATABASE
def viewRevenue():
rev_data = pd.read_csv('data/revenue.csv')
print("\n------------------------------------------ REVENUE DATABASE --------------------------------------------\n\n",rev_data.to_string(index=False))
# FUNCTION TO DISPLAY THE REVENUE GENERATED BY THIS MONTH
def viewMonthRevenue():
rev_data = | pd.read_csv('data/revenue.csv') | pandas.read_csv |
from bs4 import BeautifulSoup, element as bs4_element
import numpy as np
import pandas as pd
import re
import requests
from typing import Optional
from .readers import parse_oakland_excel
from ..caada_typing import stringlike
from ..caada_errors import HTMLParsingError, HTMLRequestError
from ..caada_logging import logger
##############
# PORT OF LA #
##############
def _convert_la_numbers(val):
# If there happens to be a ',' two characters from the end, it should probably be a decimal point.
val = re.sub(r',\d\d$', '.', val.strip())
# Then just remove the remaining commas plus any percent signs
val = re.sub(r'[,%]', '', val)
try:
return float(val)
except ValueError:
# This will handle empty cells (e.g. that haven't been filled yet) and misformatted cells (e.g. one number was
# "2,406.662.05" - two decimal points)
return np.nan
def get_all_la_port_container_data(index: str = 'datetime') -> pd.DataFrame:
"""Get Port of LA container data for all years from 1995 to present.
Parameters
----------
index
How to index the dataframe. See the `index` parameter in :func:`get_la_port_container_data` for details.
Returns
-------
pandas.DataFrame
A dataframe containing data for all years. Will be container moves broken down by import vs. export and
empty vs. full.
"""
this_year = pd.Timestamp.now().year
dfs = []
for yr in range(1995, this_year+1):
dfs.append( get_la_port_container_data(yr, index=index) )
return pd.concat(dfs, axis=0)
def get_la_port_container_data(year: int, index: str = 'datetime') -> pd.DataFrame:
"""Get Port of LA container data for a given year, return as a dataframe.
Parameters
----------
year
The year to get data for. The Port of LA keeps monthly data for 1995 and on; years before 1995 will likely
fail.
index
How to index the returned dataframe. `"datetime"` (the default) will create a datetime index; it will also
remove the year total rows. `"table"` will keep the table's original index (as strings) and will retain the
year summary rows.
Returns
-------
pd.DataFrame
A dataframe containing the data for the requested year. Will be container moves broken down by import vs. export
and empty vs. full.
"""
if index == 'datetime':
parse_year = year
elif index == 'table':
parse_year = None
else:
raise ValueError('"{}" is not one of the allowed values for index'.format(index))
r = requests.get('https://www.portoflosangeles.org/business/statistics/container-statistics/historical-teu-statistics-{:04d}'.format(year))
if r.status_code == 200:
return _parse_la_port_html(r.content, parse_year)
elif r.status_code == 404:
# Page not found, usually because you asked for a year that isn't online
raise HTMLRequestError('Failed to retrieve the Port of LA page for {}. Their server may be down, or the '
'year you requested may be out of range. Years before 1995 are not available.'.format(year))
else:
raise HTMLRequestError('Failed to retrieve the Port of LA page for {}. HTML response code was {}'.format(year, r.status_code))
def _parse_la_port_html(html: stringlike, year: Optional[int] = None) -> pd.DataFrame:
"""Parse LA port container data from HTML into a dataframe.
Parameters
----------
html
The raw HTML from the Port of LA "historical-teu-statistics" page.
year
Which year the page is for. If given, the returned dataframe will have a datetime index, and the year summary
rows are removed. If not given, the dataframe uses the original table row labels (as strings) and retains the
year summary rows.
Returns
-------
pd.DataFrame
The dataframe with the container data.
"""
soup = BeautifulSoup(html, 'html.parser')
# Should be exactly one table on the page - find it
table = soup('table')
if len(table) != 1:
raise HTMLParsingError('Expected exactly one table, got {}'.format(len(table)))
else:
table = table[0]
# Get the rows of the table - the first will give us the header, the rest will give
# us the data. Read it into a dict that can be easily converted to a dataframe
tr_tags = table('tr')
header = [_stdize_la_table_header(tag.text) for tag in tr_tags[0]('td')]
index = []
df_dict = {k: [] for k in header[1:]}
for row in tr_tags[1:]:
row_data = [tag.text.strip() if i == 0 else _convert_la_numbers(tag.text) for i, tag in enumerate(row('td'))]
index.append(row_data[0])
for i, k in enumerate(header[1:], start=1):
df_dict[k].append(row_data[i])
df = | pd.DataFrame(df_dict, index=index) | pandas.DataFrame |
import os
import configparser
import pandas as pd
import numpy as np
import psycopg2
import psycopg2.extras
# Set up GCP API
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
import sql_queries as sql_q
def convert_int_zipcode_to_str(df, col):
"""
Converts integer zipcode column into 0-padded str column.
df - pandas dataframe with zipcode int column
col - string; name of column with zipcodes
"""
df[col] = df[col].astype('str')
df[col] = df[col].apply(lambda x: x.zfill(5))
def remove_bad_zipcodes(zip_df, df, col):
"""
Removes bad zipcodes from data (i.e. erroneous zipcodes that are
not valid US zipcodes).
zip_df - pandas dataframe with valid US zipcodes
df - pandas dataframe to be cleaned
col - string; column name of zipcode column in df
"""
zip_set = set(zip_df['Zipcode'].unique())
return df[df[col].isin(zip_set)]
def load_lbnl_data(zip_df, replace_nans=True, short_zips=True):
"""
Loads LBNL solar survey data.
zip_df - pandas dataframe with zipcode data for cleaning bad zipcodes
replace_nans - boolean; if True, replaces -9999 missing value placeholders with np.nan
short_zips - boolean; if True, makes sure all zip codes are 5-digit
"""
df1 = pd.read_csv('../data/TTS_LBNL_public_file_10-Dec-2019_p1.csv', encoding='latin-1', low_memory=False)
df2 = pd.read_csv('../data/TTS_LBNL_public_file_10-Dec-2019_p2.csv', encoding='latin-1', low_memory=False)
lbnl_df = pd.concat([df1, df2], axis=0)
if replace_nans:
lbnl_df.replace(-9999, np.nan, inplace=True)
lbnl_df.replace('-9999', np.nan, inplace=True)
if short_zips:
lbnl_df['Zip Code'] = lbnl_df['Zip Code'].apply(lambda x: x.strip()[:5])
# a few zip codes with only 4 digits
lbnl_df['Zip Code'] = lbnl_df['Zip Code'].apply(lambda x: x.zfill(5))
lbnl_df = remove_bad_zipcodes(zip_df, lbnl_df, 'Zip Code')
return lbnl_df
def load_eia_zipcode_data(zip_df):
"""
Loads EIA dataset with zipcodes and energy providers.
zip_df - pandas dataframe with zipcode data for cleaning bad zipcodes
"""
iou_df = pd.read_csv('../data/iouzipcodes2017.csv')
noniou_df = pd.read_csv('../data/noniouzipcodes2017.csv')
eia_zipcode_df = pd.concat([iou_df, noniou_df], axis=0)
# zip codes are ints without zero padding
convert_int_zipcode_to_str(eia_zipcode_df, 'zip')
eia_zipcode_df = remove_bad_zipcodes(zip_df, eia_zipcode_df, 'zip')
return eia_zipcode_df
def extract_lbnl_data(zip_df):
"""
Gets data from LBNL dataset for the installer table and main metrics table.
zip_df - pandas dataframe with zipcode data for cleaning bad zipcodes
"""
lbnl_df = load_lbnl_data(zip_df, replace_nans=False)
# get mode of module manufacturer #1 for each install company
# doesn't seem to work when -9999 values are replaced with NaNs
manufacturer_modes = lbnl_df[['Installer Name', 'Module Manufacturer #1']].groupby('Installer Name').agg(lambda x: x.value_counts().index[0])
manufacturer_modes.reset_index(inplace=True)
# dictionary of installer name to ID
id_install_dict = {}
for i, r in manufacturer_modes.iterrows():
id_install_dict[r['Installer Name']] = i
# get primary installers by zipcode
installer_modes = lbnl_df[['Installer Name', 'Zip Code']].groupby('Zip Code').agg(lambda x: x.value_counts().index[0])
lbnl_zip_data = lbnl_df[['Battery System', 'Feed-in Tariff (Annual Payment)', 'Zip Code']].copy()
lbnl_zip_data.replace(-9999, 0, inplace=True)
lbnl_zip_groups = lbnl_zip_data.groupby('Zip Code').mean()
# merge with most common installer by zip codes
lbnl_zip_groups = lbnl_zip_groups.merge(installer_modes, left_index=True, right_index=True)
lbnl_zip_groups = lbnl_zip_groups[~(lbnl_zip_groups.index == '-9999')]
lbnl_zip_groups.reset_index(inplace=True)
lbnl_zip_groups['Installer ID'] = lbnl_zip_groups['Installer Name'].replace(id_install_dict)
lbnl_zip_groups['Installer ID'] = lbnl_zip_groups['Installer ID'].astype('int')
return manufacturer_modes.reset_index(), lbnl_zip_groups
def extract_eia_data(zip_df):
"""
Extracts data from EIA for main metrics table and utility table.
zip_df - pandas dataframe with zipcode data for cleaning bad zipcodes
Note: several utilities serve the same zip codes.
"""
# load zipcode to eiaid/util number data
eia_zip_df = load_eia_zipcode_data(zip_df)
# eia861 report loading
eia861_df = pd.read_excel('../data/Sales_Ult_Cust_2018.xlsx', header=[0, 1, 2])
# util number here is eiaia in the IOU data
# get relevant columns from multiindex dataframe
utility_number = eia861_df['Utility Characteristics', 'Unnamed: 1_level_1', 'Utility Number']
utility_name = eia861_df['Utility Characteristics', 'Unnamed: 2_level_1', 'Utility Name']
service_type = eia861_df['Utility Characteristics', 'Unnamed: 4_level_1', 'Service Type']
ownership = eia861_df['Utility Characteristics', 'Unnamed: 7_level_1', 'Ownership']
eia_utility_data = pd.concat([utility_number, utility_name, service_type, ownership], axis=1)
eia_utility_data.columns = eia_utility_data.columns.droplevel(0).droplevel(0)
# get residential cost and kwh usage data
res_data = eia861_df['RESIDENTIAL'].copy()
# drop uppermost level
res_data.columns = res_data.columns.droplevel(0)
# missing data seems to be a period
res_data.replace('.', np.nan, inplace=True)
for c in res_data.columns:
res_data[c] = res_data[c].astype('float')
util_number_data = pd.DataFrame(utility_number)
util_number_data.columns = util_number_data.columns.droplevel(0).droplevel(0)
res_data = pd.concat([res_data, utility_number], axis=1)
res_data.columns = ['Thousand Dollars', 'Megawatthours', 'Count', 'Utility Number']
# first join with zipcode data to group by zip
res_data_zip = res_data.merge(eia_zip_df, left_on='Utility Number', right_on='eiaid')
# group by zip and get sums of revenues, MWh, and customer count
res_data_zip = res_data_zip.groupby('zip').sum()
# convert revenues to yearly bill and MWh to kWh
# thousand dollars of revenue divided by customer count
res_data_zip['average_yearly_bill'] = res_data_zip['Thousand Dollars'] * 1000 / res_data_zip['Count']
# kwh divided by customer count
res_data_zip['average_yearly_kwh'] = (res_data_zip['Megawatthours'] * 1000) / res_data_zip['Count']
res_columns = ['average_yearly_bill', 'average_yearly_kwh']
res_data_zip = res_data_zip[res_columns]
# combine residential and utility info data
# eia_861_data = pd.concat([res_data[res_columns], eia_utility_data], axis=1)
# combine zipcodes with EIA861 utility data
eia_util_zipcode = eia_utility_data.merge(eia_zip_df, left_on='Utility Number', right_on='eiaid')
# get most-common utility name, service type, and ownership by zipcode
common_util = eia_util_zipcode[['zip', 'Utility Name', 'Service Type', 'Ownership']].groupby('zip').agg(lambda x: x.value_counts().index[0])
eia_861_summary = res_data_zip.merge(common_util, left_index=True, right_index=True)
# change zip back to a column
eia_861_summary.reset_index(inplace=True)
eia_861_summary = remove_bad_zipcodes(zip_df, eia_861_summary, 'zip')
return eia_861_summary
def extract_acs_data(zip_df, load_csv=True, save_csv=True):
"""
Extracts ACS US census data from Google BigQuery.
zip_df - pandas dataframe with zipcode data for cleaning bad zipcodes
load_csv - boolean; if True, tries to load data from csv
save_csv - boolean; if True, will save data to csv if downloading anew
"""
# ACS US census data
ACS_DB = '`bigquery-public-data`.census_bureau_acs'
ACS_TABLE = 'zip_codes_2017_5yr'
filename = '../data/acs_data.csv'
if load_csv and os.path.exists(filename):
acs_df = pd.read_csv(filename)
convert_int_zipcode_to_str(acs_df, 'geo_id')
return acs_df
acs_data_query = f"""SELECT geo_id,
median_age,
housing_units,
median_income,
owner_occupied_housing_units,
occupied_housing_units,
dwellings_1_units_detached + dwellings_1_units_attached + dwellings_2_units + dwellings_3_to_4_units AS family_homes,
bachelors_degree_2,
different_house_year_ago_different_city + different_house_year_ago_same_city AS moved_recently
FROM {ACS_DB}.{ACS_TABLE}"""
acs_data = | pd.read_gbq(acs_data_query) | pandas.read_gbq |
import copy
import json
import numpy as np
import pandas as pd
import re
import sklearn.dummy
import sklearn.ensemble
import sklearn.linear_model
import sklearn.model_selection
import sklearn.neighbors
import sklearn.neural_network
import sklearn.pipeline
import sklearn.preprocessing
import sklearn.svm
import sklearn.utils
import sys
import models
def split_filename(filename):
tokens = filename.split('.')
return '.'.join(tokens[:-1]), tokens[-1]
def load_dataframe(filename):
basename, ext = split_filename(filename)
if ext == 'txt':
# load dataframe from plaintext file
return pd.read_csv(filename, index_col=0, sep='\t')
elif ext == 'npy':
# load data matrix from binary file
X = np.load(filename)
# load row names and column names from text files
rownames = np.loadtxt('%s.rownames.txt' % basename, dtype=str)
colnames = np.loadtxt('%s.colnames.txt' % basename, dtype=str)
# combine data, row names, and column names into dataframe
return | pd.DataFrame(X, index=rownames, columns=colnames) | pandas.DataFrame |
import operator
import numpy as np
import pandas as pd
import staircase as sc
from staircase.core.ops import docstrings
from staircase.core.ops.common import _combine_stairs_via_values, requires_closed_match
from staircase.util import _sanitize_binary_operands
from staircase.util._decorators import Appender
def _make_relational_func(
docstring, numpy_relational, series_relational, float_relational
):
@Appender(docstring, join="\n", indents=1)
@requires_closed_match
def func(self, other):
self, other = _sanitize_binary_operands(
self, other
) # converts other to Stairs if not already
if np.isnan(self.initial_value) or np.isnan(other.initial_value):
initial_value = np.nan
else:
initial_value = (
float_relational(self.initial_value, other.initial_value) * 1
)
if (
(self._data is None and other._data is None)
or (np.isnan(other.initial_value) and other._data is None)
or (np.isnan(self.initial_value) and self._data is None)
):
return sc.Stairs._new(
initial_value=initial_value,
data=None,
closed=other.closed if np.isnan(self.initial_value) else self.closed,
)
elif self._data is None or other._data is None:
if other._data is None: # self._data exists
values = self._get_values()
new_values = numpy_relational(values, other.initial_value)
new_values[values.isna()] = np.nan
new_index = self._data.index
else: # other._data exists
values = other._get_values()
new_values = numpy_relational(self.initial_value, values)
new_values[values.isna()] = np.nan
new_index = other._data.index
new_instance = sc.Stairs._new(
initial_value=initial_value,
data=pd.DataFrame(
{"value": new_values * 1},
index=new_index,
),
closed=self.closed,
)
new_instance._remove_redundant_step_points()
return new_instance
else:
return _combine_stairs_via_values(
self, other, series_relational, float_relational
)
return func
def _is_series_equal(s1, s2):
if not pd.api.types.is_float_dtype(s1):
s1 = s1.astype("float64")
if not | pd.api.types.is_float_dtype(s2) | pandas.api.types.is_float_dtype |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
try:
import pandas as pd
except ImportError:
pass
from .core import Engine
from .odpssql.engine import ODPSSQLEngine
from .pd.engine import PandasEngine
from .seahawks.engine import SeahawksEngine
from .seahawks.models import SeahawksTable
from .sqlalchemy.engine import SQLAlchemyEngine
from .selecter import available_engines, Engines, EngineSelecter
from .formatter import ExprExecutionGraphFormatter
from .context import context
from .utils import process_persist_kwargs
from .. import Scalar
from ..expr.core import ExprDAG
from ..expr.expressions import CollectionExpr
from ..expr.merge import JoinCollectionExpr, UnionCollectionExpr
from ..expr.element import IsIn
from ...models import Table
from ... import options
from ...utils import gen_repr_object
def get_default_engine(*exprs):
from ... import ODPS
odps = None
engines = set()
for expr in exprs:
if expr._engine:
return expr._engine
srcs = list(expr.data_source())
expr_engines = list(available_engines(srcs))
engines.update(set(expr_engines))
if len(expr_engines) == 1:
engine = expr_engines[0]
src = srcs[0]
if engine in (Engines.ODPS, Engines.ALGO):
expr_odps = src.odps
elif engine in (Engines.PANDAS, Engines.SQLALCHEMY):
expr_odps = None
else:
raise NotImplementedError
else:
table_src = next(it for it in srcs if hasattr(it, 'odps'))
expr_odps = table_src.odps
if expr_odps is not None:
odps = expr_odps
if odps is None and options.account is not None and \
options.end_point is not None and options.default_project is not None:
odps = ODPS._from_account(options.account, options.default_project,
endpoint=options.end_point,
tunnel_endpoint=options.tunnel.endpoint)
return MixedEngine(odps, list(engines))
class MixedEngine(Engine):
def __init__(self, odps, engines=None):
self._odps = odps
self._engines = engines
self._generated_table_names = []
self._selecter = EngineSelecter()
self._pandas_engine = PandasEngine(self._odps)
self._odpssql_engine = ODPSSQLEngine(self._odps)
self._seahawks_engine = SeahawksEngine(self._odps)
self._sqlalchemy_engine = SQLAlchemyEngine(self._odps)
from ...ml.engine import OdpsAlgoEngine
self._xflow_engine = OdpsAlgoEngine(self._odps)
def stop(self):
self._pandas_engine.stop()
self._odpssql_engine.stop()
self._seahawks_engine.stop()
self._xflow_engine.stop()
def _gen_table_name(self):
table_name = self._odpssql_engine._gen_table_name()
self._generated_table_names.append(table_name)
return table_name
def _get_backend(self, expr_dag):
engine = self._selecter.select(expr_dag)
if engine == Engines.ODPS:
return self._odpssql_engine
elif engine == Engines.PANDAS:
return self._pandas_engine
elif engine == Engines.SQLALCHEMY:
return self._sqlalchemy_engine
elif engine == Engines.ALGO:
return self._xflow_engine
else:
assert engine == Engines.SEAHAWKS
return self._seahawks_engine
def _delegate(self, method, expr_dag, dag, expr, **kwargs):
return getattr(self._get_backend(expr_dag), method)(expr_dag, dag, expr, **kwargs)
def _cache(self, expr_dag, dag, expr, **kwargs):
return self._delegate('_cache', expr_dag, dag, expr, **kwargs)
def _handle_dep(self, expr_dag, dag, expr, **kwargs):
return self._delegate('_handle_dep', expr_dag, dag, expr, **kwargs)
def _execute(self, expr_dag, dag, expr, **kwargs):
return self._delegate('_execute', expr_dag, dag, expr, **kwargs)
def _persist(self, name, expr_dag, dag, expr, **kwargs):
return self._get_backend(expr_dag)._persist(name, expr_dag, dag, expr, **kwargs)
def _handle_join_or_union(self, expr_dag, dag, _, **kwargs):
root = expr_dag.root
if not self._selecter.has_diff_data_sources(root, no_cache=True):
return
to_execute = root._lhs if not self._selecter.has_odps_data_source(root._lhs) \
else root._rhs
table_name = self._gen_table_name()
sub = CollectionExpr(_source_data=self._odps.get_table(table_name),
_schema=to_execute.schema)
sub.add_deps(to_execute)
expr_dag.substitute(to_execute, sub)
# prevent the kwargs come from `persist`
process_persist_kwargs(kwargs)
execute_dag = ExprDAG(to_execute, dag=expr_dag)
return self._get_backend(execute_dag)._persist(
table_name, execute_dag, dag, to_execute, **kwargs)
def _handle_isin(self, expr_dag, dag, expr, **kwargs):
if not self._selecter.has_diff_data_sources(expr_dag.root, no_cache=True):
return
seq = expr._values[0]
expr._values = None
execute_dag = ExprDAG(seq, dag=expr_dag)
execute_node = self._get_backend(execute_dag)._execute(
execute_dag, dag, seq, **kwargs)
def callback(res):
vals = res[:, 0].tolist()
expr._values = tuple(Scalar(val) for val in vals)
execute_node.callback = callback
return execute_node
def _handle_function(self, expr_dag, dag, _, **kwargs):
root = expr_dag.root
# if expr input comes from an ODPS table
is_root_input_from_odps = \
self._selecter.has_odps_data_source(root.children()[0])
for i, collection in enumerate(root._collection_resources):
# if collection resource comes from an ODPS table
is_source_from_odps = self._selecter.has_odps_data_source(collection)
if is_root_input_from_odps and not is_source_from_odps:
table_name = self._gen_table_name()
sub = CollectionExpr(_source_data=self._odps.get_table(table_name),
_schema=collection.schema)
sub.add_deps(collection)
expr_dag.substitute(collection, sub)
# prevent the kwargs come from `persist`
process_persist_kwargs(kwargs)
execute_dag = ExprDAG(collection, dag=expr_dag)
self._get_backend(execute_dag)._persist(
table_name, execute_dag, dag, collection, **kwargs)
elif not is_root_input_from_odps and is_source_from_odps:
if not self._selecter.has_pandas_data_source(root.children()[0]):
raise NotImplementedError
sub = CollectionExpr(_source_data= | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
import json
import csv
from torch.utils.data import Dataset, DataLoader
import nlpaug.augmenter.word as naw
def create_en_dataset(txt_path, save_path, language='en'):
patient = []
doctor = []
if language == 'en':
with open(txt_path, 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if line.startswith('Patient:'):
patient.append(' '.join(lines[i+1:i+2]))
elif line.startswith('Doctor:'):
doctor.append(' '.join(lines[i+1: i+2]))
data = {'src': patient, 'trg': doctor}
df = pd.DataFrame.from_dict(data)
df.to_csv(os.path.join(save_path, 'dialogue.csv'), index=False)
"""
Use this function if we are going to use Chinese dataset
"""
def read_chinese_json(src_json, language='cn'):
df = pd.read_json(src_json)
dialogue = df.Dialogue
patient = []
doctor = []
for lines in dialogue:
for line in lines:
if line.startswith('病人'):
patient.append(line)
elif line.startswith('医生'):
doctor.append(line)
data = {'src': patient, 'trg': doctor}
corpus = pd.DataFrame.from_dict(data)
corpus.to_csv(os.path.join(
save_path, 'full_{}.csv'.format(language)), index=False)
train, val, test = split_by_fractions(corpus, [0.8, 0.1, 0.1])
print('generating csv for train, validation and test')
train.to_csv(os.path.join(
save_path, 'train_{}.csv'.format(language)), index=False)
val.to_csv(os.path.join(
save_path, 'val_{}.csv'.format(language)), index=False)
test.to_csv(os.path.join(
save_path, 'test_{}.csv'.format(language)), index=False)
def removeprefix(string, prefix):
if string.startswith(prefix):
return string[len(prefix):]
else:
return string[:]
def split_by_fractions(df: pd.DataFrame, fracs: list, random_state: int = 42):
assert sum(fracs) == 1.0, 'fractions sum is not 1.0 (fractions_sum={})'.format(
sum(fracs))
remain = df.index.copy().to_frame()
res = []
for i in range(len(fracs)):
fractions_sum = sum(fracs[i:])
frac = fracs[i]/fractions_sum
idxs = remain.sample(frac=frac, random_state=random_state).index
remain = remain.drop(idxs)
res.append(idxs)
return [df.loc[idxs] for idxs in res]
def save_df(train, test, validation, save_dir):
train.to_csv(os.path.join(save_dir, 'train.tsv'), sep='\t')
validation.to_csv(os.path.join(save_dir, 'valid.tsv'), sep='\t')
test.to_csv(os.path.join(save_dir, 'test.tsv'), sep='\t')
def augment_dataset(csv, model_dir):
original = | pd.read_csv(csv) | pandas.read_csv |
from collections import OrderedDict
import pydoc
import warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Series,
TimedeltaIndex,
date_range,
period_range,
timedelta_range,
)
from pandas.core.arrays import PeriodArray
from pandas.core.indexes.datetimes import Timestamp
import pandas.util.testing as tm
import pandas.io.formats.printing as printing
class TestSeriesMisc:
def test_scalarop_preserve_name(self, datetime_series):
result = datetime_series * 2
assert result.name == datetime_series.name
def test_copy_name(self, datetime_series):
result = datetime_series.copy()
assert result.name == datetime_series.name
def test_copy_index_name_checking(self, datetime_series):
# don't want to be able to modify the index stored elsewhere after
# making a copy
datetime_series.index.name = None
assert datetime_series.index.name is None
assert datetime_series is datetime_series
cp = datetime_series.copy()
cp.index.name = "foo"
printing.pprint_thing(datetime_series.index.name)
assert datetime_series.index.name is None
def test_append_preserve_name(self, datetime_series):
result = datetime_series[:5].append(datetime_series[5:])
assert result.name == datetime_series.name
def test_binop_maybe_preserve_name(self, datetime_series):
# names match, preserve
result = datetime_series * datetime_series
assert result.name == datetime_series.name
result = datetime_series.mul(datetime_series)
assert result.name == datetime_series.name
result = datetime_series * datetime_series[:-2]
assert result.name == datetime_series.name
# names don't match, don't preserve
cp = datetime_series.copy()
cp.name = "something else"
result = datetime_series + cp
assert result.name is None
result = datetime_series.add(cp)
assert result.name is None
ops = ["add", "sub", "mul", "div", "truediv", "floordiv", "mod", "pow"]
ops = ops + ["r" + op for op in ops]
for op in ops:
# names match, preserve
s = datetime_series.copy()
result = getattr(s, op)(s)
assert result.name == datetime_series.name
# names don't match, don't preserve
cp = datetime_series.copy()
cp.name = "changed"
result = getattr(s, op)(cp)
assert result.name is None
def test_combine_first_name(self, datetime_series):
result = datetime_series.combine_first(datetime_series[:5])
assert result.name == datetime_series.name
def test_getitem_preserve_name(self, datetime_series):
result = datetime_series[datetime_series > 0]
assert result.name == datetime_series.name
result = datetime_series[[0, 2, 4]]
assert result.name == datetime_series.name
result = datetime_series[5:10]
assert result.name == datetime_series.name
def test_pickle_datetimes(self, datetime_series):
unp_ts = self._pickle_roundtrip(datetime_series)
tm.assert_series_equal(unp_ts, datetime_series)
def test_pickle_strings(self, string_series):
unp_series = self._pickle_roundtrip(string_series)
tm.assert_series_equal(unp_series, string_series)
def _pickle_roundtrip(self, obj):
with tm.ensure_clean() as path:
obj.to_pickle(path)
unpickled = pd.read_pickle(path)
return unpickled
def test_argsort_preserve_name(self, datetime_series):
result = datetime_series.argsort()
assert result.name == datetime_series.name
def test_sort_index_name(self, datetime_series):
result = datetime_series.sort_index(ascending=False)
assert result.name == datetime_series.name
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
result = Series(d)
expected = Series(d, index=sorted(d.keys()))
tm.assert_series_equal(result, expected)
result = Series(d, index=["b", "c", "d", "a"])
expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
tm.assert_series_equal(result, expected)
def test_constructor_subclass_dict(self):
data = tm.TestSubDict((x, 10.0 * x) for x in range(10))
series = Series(data)
expected = Series(dict(data.items()))
tm.assert_series_equal(series, expected)
def test_constructor_ordereddict(self):
# GH3283
data = OrderedDict(
("col{i}".format(i=i), np.random.random()) for i in range(12)
)
series = Series(data)
expected = Series(list(data.values()), list(data.keys()))
tm.assert_series_equal(series, expected)
# Test with subclass
class A(OrderedDict):
pass
series = Series(A(data))
tm.assert_series_equal(series, expected)
def test_constructor_dict_multiindex(self):
d = {("a", "a"): 0.0, ("b", "a"): 1.0, ("b", "c"): 2.0}
_d = sorted(d.items())
result = Series(d)
expected = Series(
[x[1] for x in _d], index=pd.MultiIndex.from_tuples([x[0] for x in _d])
)
tm.assert_series_equal(result, expected)
d["z"] = 111.0
_d.insert(0, ("z", d["z"]))
result = Series(d)
expected = Series(
[x[1] for x in _d], index=pd.Index([x[0] for x in _d], tupleize_cols=False)
)
result = result.reindex(index=expected.index)
tm.assert_series_equal(result, expected)
def test_constructor_dict_timedelta_index(self):
# GH #12169 : Resample category data with timedelta index
# construct Series from dict as data and TimedeltaIndex as index
# will result NaN in result Series data
expected = Series(
data=["A", "B", "C"], index=pd.to_timedelta([0, 10, 20], unit="s")
)
result = Series(
data={
pd.to_timedelta(0, unit="s"): "A",
pd.to_timedelta(10, unit="s"): "B",
pd.to_timedelta(20, unit="s"): "C",
},
index=pd.to_timedelta([0, 10, 20], unit="s"),
)
tm.assert_series_equal(result, expected)
def test_sparse_accessor_updates_on_inplace(self):
s = pd.Series([1, 1, 2, 3], dtype="Sparse[int]")
s.drop([0, 1], inplace=True)
assert s.sparse.density == 1.0
def test_tab_completion(self):
# GH 9910
s = Series(list("abcd"))
# Series of str values should have .str but not .dt/.cat in __dir__
assert "str" in dir(s)
assert "dt" not in dir(s)
assert "cat" not in dir(s)
# similarly for .dt
s = Series(date_range("1/1/2015", periods=5))
assert "dt" in dir(s)
assert "str" not in dir(s)
assert "cat" not in dir(s)
# Similarly for .cat, but with the twist that str and dt should be
# there if the categories are of that type first cat and str.
s = Series(list("abbcd"), dtype="category")
assert "cat" in dir(s)
assert "str" in dir(s) # as it is a string categorical
assert "dt" not in dir(s)
# similar to cat and str
s = Series(date_range("1/1/2015", periods=5)).astype("category")
assert "cat" in dir(s)
assert "str" not in dir(s)
assert "dt" in dir(s) # as it is a datetime categorical
def test_tab_completion_with_categorical(self):
# test the tab completion display
ok_for_cat = [
"categories",
"codes",
"ordered",
"set_categories",
"add_categories",
"remove_categories",
"rename_categories",
"reorder_categories",
"remove_unused_categories",
"as_ordered",
"as_unordered",
]
def get_dir(s):
results = [r for r in s.cat.__dir__() if not r.startswith("_")]
return sorted(set(results))
s = Series(list("aabbcde")).astype("category")
results = get_dir(s)
tm.assert_almost_equal(results, sorted(set(ok_for_cat)))
@pytest.mark.parametrize(
"index",
[
tm.makeUnicodeIndex(10),
tm.makeStringIndex(10),
tm.makeCategoricalIndex(10),
Index(["foo", "bar", "baz"] * 2),
tm.makeDateIndex(10),
tm.makePeriodIndex(10),
tm.makeTimedeltaIndex(10),
tm.makeIntIndex(10),
tm.makeUIntIndex(10),
tm.makeIntIndex(10),
tm.makeFloatIndex(10),
Index([True, False]),
Index(["a{}".format(i) for i in range(101)]),
pd.MultiIndex.from_tuples(zip("ABCD", "EFGH")),
pd.MultiIndex.from_tuples(zip([0, 1, 2, 3], "EFGH")),
],
)
def test_index_tab_completion(self, index):
# dir contains string-like values of the Index.
s = pd.Series(index=index)
dir_s = dir(s)
for i, x in enumerate(s.index.unique(level=0)):
if i < 100:
assert not isinstance(x, str) or not x.isidentifier() or x in dir_s
else:
assert x not in dir_s
def test_not_hashable(self):
s_empty = Series()
s = Series([1])
msg = "'Series' objects are mutable, thus they cannot be hashed"
with pytest.raises(TypeError, match=msg):
hash(s_empty)
with pytest.raises(TypeError, match=msg):
hash(s)
def test_contains(self, datetime_series):
tm.assert_contains_all(datetime_series.index, datetime_series)
def test_iter_datetimes(self, datetime_series):
for i, val in enumerate(datetime_series):
assert val == datetime_series[i]
def test_iter_strings(self, string_series):
for i, val in enumerate(string_series):
assert val == string_series[i]
def test_keys(self, datetime_series):
# HACK: By doing this in two stages, we avoid 2to3 wrapping the call
# to .keys() in a list()
getkeys = datetime_series.keys
assert getkeys() is datetime_series.index
def test_values(self, datetime_series):
tm.assert_almost_equal(
datetime_series.values, datetime_series, check_dtype=False
)
def test_iteritems_datetimes(self, datetime_series):
for idx, val in datetime_series.iteritems():
assert val == datetime_series[idx]
def test_iteritems_strings(self, string_series):
for idx, val in string_series.iteritems():
assert val == string_series[idx]
# assert is lazy (genrators don't define reverse, lists do)
assert not hasattr(string_series.iteritems(), "reverse")
def test_items_datetimes(self, datetime_series):
for idx, val in datetime_series.items():
assert val == datetime_series[idx]
def test_items_strings(self, string_series):
for idx, val in string_series.items():
assert val == string_series[idx]
# assert is lazy (genrators don't define reverse, lists do)
assert not hasattr(string_series.items(), "reverse")
def test_raise_on_info(self):
s = Series(np.random.randn(10))
msg = "'Series' object has no attribute 'info'"
with pytest.raises(AttributeError, match=msg):
s.info()
def test_copy(self):
for deep in [None, False, True]:
s = Series(np.arange(10), dtype="float64")
# default deep is True
if deep is None:
s2 = s.copy()
else:
s2 = s.copy(deep=deep)
s2[::2] = np.NaN
if deep is None or deep is True:
# Did not modify original Series
assert np.isnan(s2[0])
assert not np.isnan(s[0])
else:
# we DID modify the original Series
assert np.isnan(s2[0])
assert np.isnan(s[0])
def test_copy_tzaware(self):
# GH#11794
# copy of tz-aware
expected = Series([Timestamp("2012/01/01", tz="UTC")])
expected2 = Series([Timestamp("1999/01/01", tz="UTC")])
for deep in [None, False, True]:
s = Series([Timestamp("2012/01/01", tz="UTC")])
if deep is None:
s2 = s.copy()
else:
s2 = s.copy(deep=deep)
s2[0] = pd.Timestamp("1999/01/01", tz="UTC")
# default deep is True
if deep is None or deep is True:
# Did not modify original Series
tm.assert_series_equal(s2, expected2)
tm.assert_series_equal(s, expected)
else:
# we DID modify the original Series
tm.assert_series_equal(s2, expected2)
tm.assert_series_equal(s, expected2)
def test_axis_alias(self):
s = | Series([1, 2, np.nan]) | pandas.Series |
import pytest
from pandas import Interval, DataFrame
from pandas.testing import assert_frame_equal
from datar.base.funs import *
from datar.base import table, pi, paste0
from datar.stats import rnorm
from .conftest import assert_iterable_equal
def test_cut():
z = rnorm(10000)
tab = table(cut(z, breaks=range(-6, 7)))
assert tab.shape == (1, 12)
assert tab.columns.tolist() == [
Interval(-6, -5, closed='right'),
Interval(-5, -4, closed='right'),
Interval(-4, -3, closed='right'),
Interval(-3, -2, closed='right'),
Interval(-2, -1, closed='right'),
Interval(-1, 0, closed='right'),
| Interval(0, 1, closed='right') | pandas.Interval |
# coding: utf-8
import os
import pandas as pd
from tqdm import tqdm
from czsc.objects import RawBar, Freq
from czsc.utils.bar_generator import BarGenerator, freq_end_time
from test.test_analyze import read_1min
cur_path = os.path.split(os.path.realpath(__file__))[0]
kline = read_1min()
def test_freq_end_time():
assert freq_end_time(pd.to_datetime("2021-11-11 09:43"), Freq.F1) == pd.to_datetime("2021-11-11 09:43")
assert freq_end_time(pd.to_datetime("2021-11-11 09:43"), Freq.F5) == pd.to_datetime("2021-11-11 09:45")
assert freq_end_time(pd.to_datetime("2021-11-11 09:43"), Freq.F15) == pd.to_datetime("2021-11-11 09:45")
assert freq_end_time(pd.to_datetime("2021-11-11 09:45"), Freq.F15) == pd.to_datetime("2021-11-11 09:45")
assert freq_end_time(pd.to_datetime("2021-11-11 14:56"), Freq.F15) == pd.to_datetime("2021-11-11 15:00")
assert freq_end_time(pd.to_datetime("2021-11-11 09:43"), Freq.F30) == pd.to_datetime("2021-11-11 10:00")
assert freq_end_time(pd.to_datetime("2021-11-11 09:43"), Freq.F60) == pd.to_datetime("2021-11-11 10:30")
assert freq_end_time(pd.to_datetime("2021-11-11 09:43"), Freq.D) == pd.to_datetime("2021-11-11")
assert freq_end_time( | pd.to_datetime("2021-11-11 09:43") | pandas.to_datetime |
from sklearn.model_selection import train_test_split
import os
import shutil
import zipfile
import logging
import pandas as pd
import glob
from tqdm import tqdm
from pathlib import Path
from ..common import Common
from ..util import read_img, get_img_dim
from .dataset import Dataset
# All dataset parsers inheret from Dataset.
class LISA_TL(Dataset):
def __init__(self, lisats_config: dict, config: Common):
"""
lisats_config: dict
Dict of values loaded from the dataset.yaml file
expected keys:
[RAW_ROOT] -> root path of raw dataset.zips
[ZIP] -> name of zip file, with .zip
[CLEAN_ROOT] -> where to extract the cleaned files.
[PREPEND] -> prefix to all .txt and .jpg files outputted
config: Common
A Common object created from loaded yaml files with the configs.
"""
# path to raw file (absolute)
self.raw_path = config.root + lisats_config["RAW_FOLDER"] + lisats_config["ZIP"]
# path to initialization folder (absolute)
self.init_path = config.init_folder + lisats_config["INIT_FOLDER"]
self.annot_file = self.init_path + "allAnnotations.csv"
self.prepend = lisats_config["PREPEND"]
self.config = config
# ===============================================
# initializes dataset into intermediary unzipped and cleaned state.
def init_dataset(self):
"""
unzips lisatl into intermidiary INIT_FOLDER
cleans up files from unzip to minimize disk space.
"""
assert os.path.isfile(self.raw_path) # makes sure that the zip actually exists.
print("Started initializing LISATL!")
# make subfolder inside INIT_FOLDER path.
os.makedirs(self.init_path, exist_ok=True)
# unzips file into directory
with zipfile.ZipFile(self.raw_path, "r") as zip_ref:
# zip_ref.extractall(self.init_path)
for member in tqdm(zip_ref.infolist(), desc='Extracting '):
zip_ref.extract(member, self.init_path)
# rename nightTrain, dayTrain to nightTraining, dayTraining (match annotation file)
shutil.move(self.init_path+"nightTrain/nightTrain", self.init_path+"nightTraining")
shutil.move(self.init_path+"dayTrain/dayTrain", self.init_path+"dayTraining")
# delete some of the unnecessary folders.
delete_folders = ["sample-nightClip1/", "sample-dayClip6/", "nightSequence1", "nightSequence2", "daySequence1", "daySequence2", "nightTrain", "dayTrain"]
for folder in delete_folders:
shutil.rmtree(self.init_path + folder)
# read in all day annotations
total_day_df = []
for dayClip in tqdm([x for x in Path(self.init_path+"Annotations/Annotations/dayTrain/").glob('**/*') if x.is_dir()]):
path = os.path.join(dayClip, "frameAnnotationsBOX.csv")
total_day_df.append(pd.read_csv(path, sep=";"))
tdf_day = pd.concat(total_day_df)
tdf_day["day"] = 1
# read in all night annotations
total_night_df = []
for nightClip in tqdm([x for x in Path(self.init_path+"Annotations/Annotations/nightTrain/").glob('**/*') if x.is_dir()]):
path = os.path.join(nightClip, "frameAnnotationsBOX.csv")
total_night_df.append(pd.read_csv(path, sep=";"))
tdf_night = pd.concat(total_night_df)
tdf_night["day"] = 0
all_annotations_df = pd.concat([tdf_day, tdf_night]) # combine the two annotations
# fixes filepaths
all_annotations_df.rename(columns={"Filename": "Filename_old"}, inplace=True)
def fix_filename(origin_col, file_col):
path = origin_col.split("/")[:2] # adds the first two folders ie dayTraining, dayClip1
path.append("frames") # adds the frames folder
path.append(file_col.split("/")[-1]) # adds the image file.
return "/".join(path)
all_annotations_df["Filename"] = all_annotations_df.apply(
lambda row: fix_filename(row["Origin file"], row["Filename_old"]), axis=1
)
all_annotations_df.to_csv(self.annot_file) # output to csv
print("Finished initializing LISATL!")
# ================================================
# parses dataset into output folder
def parse_label(self, img_annotations, img_width, img_height):
labels = []
for _, row in img_annotations.iterrows():
img_class = self.config.classes_dict[row["Annotation"]]
x_left = float(row["Upper left corner X"])
x_right = float(row["Lower right corner X"])
y_lower = float(row["Upper left corner Y"])
y_upper = float(row["Lower right corner Y"])
# error checking. I am cranky
assert y_lower < y_upper
assert x_left < x_right
labels.append(
self.config.formatter.parse_label(
img_class,
img_width,
img_height,
x_left,
y_lower,
x_right,
y_upper,
)
)
return labels
def parse_dataset(self):
assert os.path.isfile(self.annot_file)
print("Started Parsing LISA_TL")
wanted_cols = ["Filename", "Annotation tag", "Upper left corner X", "Lower right corner X", "Upper left corner Y", "Lower right corner Y"]
annot_df = | pd.read_csv(self.annot_file, sep=",", header=0, usecols=wanted_cols) | pandas.read_csv |
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
#from mpmath import mp
'''
cw.hist_nco(220,1,1e-2,0,90,-90,90,1,3000)
xh,yh,zh -> x(t) homogeneo,y(t) homogeneo,z(t) homogeneo, (Tempo de simulacao[s])
vxh,vyh,vzh -> vxh(t) homogeneo,vyh(t) homogeneo,vzh(t) homogeneo, (Tempo de simulacao[s])
x0,y0,z0 -> Posição iniciai para colisao [Grau,Km],(Tempo para colisao[s])
vx0,vy0,VZ0 -> Velocidade inicial para colisao [km/s,km,s,(Tempo para colisao[s])]
w -> Velocidade angular em função da altitude
'''
# CONSTANTES
mi = 398600.4418
raio_terra = 6378.1366
# Termo utilizado nas equações de cw
def a(w,t):
return(math.sin(w*t))
# Termo utilizado nas equações de cw
def b(w,t):
return(math.cos(w*t))
def xp(x0,y0,z0,vx0,vy0,vz0,t,w,vex,vey,vez,chi,gamma):
pass
def yp(x0,y0,z0,vx0,vy0,vz0,t,w,vex,vey,vez,chi,gamma):
Sa,Sb,Sc = 0,0,0
for n in range (1,150):
Sa += ((math.pow(-1,n+1))/(n*pow(chi,n)))*((2*vex)/w + ((n*gamma*vey)/pow(w,2)))*(1/(1+pow(n*gamma/w,2)))
Sb += ((math.pow(-1,n+1))/(n*pow(chi,n)))*(vey/w + ((2*n*gamma*vex)/pow(w,2)))*(1/(1+pow(n*gamma/w,2)))
Sc += ((math.pow(-1,n+1))/(n*pow(chi,n)))*(vey + ((n*gamma*vey)/pow(w,2)))*(1/(1+pow(n*gamma/w,2)))*math.exp(-n*gamma*t)
A = 2*vx0/w - 3*y0 + ((2*vex)/w)*math.log((chi + 1)/chi) - (Sa)
B = vy0/w + (vey/w)*math.log((chi+1)/chi) + (Sb)
D = 4*y0 - 2*vx0/w - ((2*vex)/w)*math.log((chi+1)/chi)
#E = 6*w*y0 - 3*vx0 - 3*vex*math.log((chi+1)/chi)
print(Sa)
print(Sb)
print(Sc)
return(A*b(w,t)+B*a(w,t)+Sc+D)
def zp(x0,y0,z0,vx0,vy0,vz0,t,w,vex,vey,vez,chi,gamma):
az = (vz0/w)+(vez/w)*math.log((chi+(1.0))/chi)
sf = 0
for n in range(1,150):
p1 = math.pow(-1,n+1)
p2 = math.pow(chi,n)
k = 1/(1+(((n*gamma)*(n*gamma))/(w*w)))
k1 = (n*gamma)/w
k2 = w/(n*gamma)
s1znt = ((p1)/(n*p2))*k*(a(w,t)+k1*(b(w,t)-((1.0)/math.exp(n*gamma*t))))
sf = sf + s1znt
return(z0*b(w,t)+az*a(w,t)-(vez/w)*sf)
# Calcula o x em função do t
def xh(x0,y0,z0,vx0,vy0,vz0,t,w):
return( (vx0/w)*a(w,t)-((2.0)*vy0/w+(3.0)*x0)*b(w,t)+((2.0)*vy0/w+(4.0)*x0))
# Calcula o y em função do t
def yh(x0,y0,z0,vx0,vy0,vz0,t,w):
return(((4.0)*vy0/w+(6.0)*x0)*a(w,t)+((2.0)*vx0/w)*b(w,t)+(y0-(2.0)*vx0/w)-((3.0)*vy0+(6.0)*w*x0)*t)
# Calcula o z em função do t
def zh(x0,y0,z0,vx0,vy0,vz0,t,w):
return(z0*b(w,t)+(vz0/w)*a(w,t))
# Calcula o vx em função do t
def vxh(x0,y0,z0,vx0,vy0,vz0,t,w):
return ( vx0*b(w,t) + (2.*vy0+3.*w*x0)*a(w,t) )
# Calcula o vy em função do t
def vyh(x0,y0,z0,vx0,vy0,vz0,t,w):
return( -2.*vx0*a(w,t) + (4.*vy0+6.*w*x0)*b(w,t) - (3.*vy0+6.*w*x0) )
# Calcula do vz em função do t
def vzh(x0,y0,z0,vx0,vy0,vz0,t,w):
return( -z0*w*a(w,t) + vz0*b(w,t) )
# Calcula o x0
def x0(pitch,yaw,r0):
return ( r0*math.sin((yaw*math.pi)/180)*math.sin((pitch*math.pi)/180) )
# Calcula o y0
def y0(pitch,yaw,r0):
return( r0*math.sin((yaw*math.pi)/180)*math.cos((pitch*math.pi)/180) )
# Calcula o z0
def z0(pitch,yaw,r0):
return ( r0*math.cos((yaw*math.pi)/180) )
# Calcula a velocidade inicial em z para ter um x final = 0
def vx0(x0,y0,z0,vy0,t,w):
return ( -(w*x0*((4.0)-(3.0)*b(w,t))+(2.0)*((1.0)-b(w,t))*vy0)/(a(w,t)) )
# Calcula a velocidade inicial em z para ter um y final = 0
def vy0(x0,y0,z0,t,w):
return (((((6.0)*x0*(w*t-a(w,t))-y0))*w*a(w,t))-((2.0)*w*x0*((4.0)-(3.0)*b(w,t))*((1.0)-b(w,t))))/(((4.0)*a(w,t)-(3.0)*w*t)*a(w,t)+(4.0)*((1.0)-b(w,t))*((1.0)-b(w,t)))
# Calcula a velocidade inicial em z para ter um z final = 0
def vz0(x0,y0,z0,vy0,t,w):
return( -(z0*b(w,t)*w)/a(w,t) )
# Calcula a velocidade inicial em x para ter um x final = xr
def vx0_(xr,x0,vy0,t,w):
return( (xr + vy0*((2*b(w,t)-2)/w) + x0*(3*b(w,t)-4))*(w/a(w,t)) )
# Calcula a velocidade inicial em y para ter um y final = yr
def vy0_(xr,yr,x0,y0,t,w):
A = 6*x0*a(w,t) - 6*w*x0*t
B = ((4*a(w,t))/w) - 3*t
C = ((w*xr)/a(w,t)) + ((3*w*x0*b(w,t))/a(w,t)) - ((4*w*x0)/a(w,t))
numerador = ((-2*C*(b(w,t)-1))/w) -A + yr
denominador = B + ((4*(b(w,t)-1)*(b(w,t)-1))/(w*a(w,t)))
return(numerador/denominador)
# Calcula a velocidade inicial em z para ter um z final = zr
def vz0_(zr,z0,t,w):
return( (zr*w)/a(w,t) - (z0*b(w,t)*w)/a(w,t) )
# Calcula o Omega da orbita
def w(altura):
return( math.sqrt(mi/((raio_terra+altura)**3)) )
# Histograma de condições iniciais de colisão
def histograma_colisao(alt,r0,rR,pitch_inicial,pitch_final,yaw_inicial,yaw_final,t0,tf):
w_ = w(220)
hist=[0,0,0,0,0,0,0,0]
v0_hist=['0-1','1-2.5','2.5-4','4-5.5','5.5-7.5','7.5-8.5','8.5-11','11-20']
for pitch in range(pitch_inicial,pitch_final):
print("Carregando: {}/{}".format(pitch,pitch_final))
for yaw in range(yaw_inicial,yaw_final):
x0_ = x0(pitch,yaw,r0)
y0_ = y0(pitch,yaw,r0)
z0_ = z0(pitch,yaw,r0)
for tc in range(t0,tf):
vy0_ = vy0(x0_,y0_,tc,w_)
vx0_ = vx0(x0_,vy0_,tc,w_)
vz0_ = vz0(z0_,tc,w_)
k=0
v0 = math.sqrt((vx0_*vx0_)+(vy0_*vy0_)+(vz0_*vz0_))
for t in range(0,tc):
xh_ = xh(x0_,y0_,z0_,vx0_,vy0_,vz0_,t,w_)
yh_ = yh(x0_,y0_,z0_,vx0_,vy0_,vz0_,t,w_)
zh_ = zh(x0_,y0_,z0_,vx0_,vy0_,vz0_,t,w_)
rh = math.sqrt((xh_*xh_)+(yh_*yh_)+(zh_*zh_))
if(rh/(alt+raio_terra) <= rR): k+=1
if(k>=tc-0.1):
if((v0>0)and(v0<=1)) : hist[0]+=2
elif((v0>1)and(v0<=2.5)) : hist[1]+=2
elif((v0>2.5)and(v0<=4)) : hist[2]+=2
elif((v0>4)and(v0<=5.5)) : hist[3]+=2
elif((v0>5.5)and(v0<=7.5)) : hist[4]+=2
elif((v0>7.5)and(v0<=8.5)) : hist[5]+=2
elif((v0>8.5)and(v0<=11)) : hist[6]+=2
elif((v0>11)and(v0<=20)) : hist[7]+=2
dados = pd.DataFrame({"Data":hist,"V0":v0_hist})
print(dados)
# Histograma de condições inicias de não colisão
def histograma_naocolisao(alt,r0,rR,pitch_inicial,pitch_final,yaw_inicial,yaw_final,t0,tf):
w_ = w(alt)
hist=[0,0]
a=-1
rf_histograma = [30,40]
caminho = 'output_{}'.format(r0)
arquivo = open(caminho, 'w') # Abre novamente o arquivo (escrita)
arquivo.writelines('R0:{}\n'.format(r0)) # escreva o conteúdo criado anteriormente nele.
arquivo.close()
for rf in rf_histograma:
a+=1
for pitch in range(pitch_inicial,pitch_final):
print('{}:\t{}\t{}\n'.format(rf,pitch,datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
arquivo = open(caminho, 'r') # Abra o arquivo (leitura)
conteudo = arquivo.readlines()
conteudo.append('{}:\t{}\t{}\n'.format(rf,pitch,datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S"))) # insira seu conteúdo
arquivo = open(caminho, 'w') # Abre novamente o arquivo (escrita)
arquivo.writelines(conteudo) # escreva o conteúdo criado anteriormente nele.
arquivo.close()
for yaw in range(yaw_inicial,yaw_final):
x0_ = x0(pitch,yaw,r0)
y0_ = y0(pitch,yaw,r0)
z0_ = z0(pitch,yaw,r0)
for tc in range(t0,tf):
_vy0_ = vy0_(rf/math.sqrt(3),r0/math.sqrt(3),x0_,y0_,tc,w_)
_vx0_ = vx0_(rf/math.sqrt(3),x0_,_vy0_,tc,w_)
_vz0_ = vz0_(rf/math.sqrt(3),z0_,tc,w_)
k,q=0,0
for t in range(0,tc):
xh_ = xh(x0_,y0_,z0_,_vx0_,_vy0_,_vz0_,t,w_)
yh_ = yh(x0_,y0_,z0_,_vx0_,_vy0_,_vz0_,t,w_)
zh_ = zh(x0_,y0_,z0_,_vx0_,_vy0_,_vz0_,t,w_)
rh = math.sqrt((xh_*xh_)+(yh_*yh_)+(zh_*zh_))
if(rh/(alt+raio_terra) <= rR):
k+=1
if(rh > 0):
q+= 1
if(k>=tc-0.1 and q>=tc-0.1):
hist[a]+=4
arquivo = open(caminho, 'r') # Abra o arquivo (leitura)
conteudo = arquivo.readlines()
conteudo.append('{}:{}\n'.format(rf,hist[a])) # insira seu conteúdo
arquivo = open(caminho, 'w') # Abre novamente o arquivo (escrita)
arquivo.writelines(conteudo) # escreva o conteúdo criado anteriormente nele.
arquivo.close()
# Gera banco de dados com a dinâmica relativa entre duas particulas utilizando pitch e yaw
def cinematica_esferica(alt,pitch,yaw,r0,t0,tf,vx0,vy0,vz0,graf):
w_ = w(alt)
x0_ = x0(pitch,yaw,r0)
y0_ = y0(pitch,yaw,r0)
z0_ = z0(pitch,yaw,r0)
xt = np.zeros(tf - t0 + 1)
yt = np.zeros(tf - t0 + 1)
zt = np.zeros(tf - t0 + 1)
vxt = np.zeros(tf - t0 + 1)
vyt = np.zeros(tf - t0 + 1)
vzt = np.zeros(tf - t0 + 1)
tempo = np.zeros(tf - t0 + 1)
r = np.zeros(tf - t0 + 1)
v = np.zeros(tf - t0 + 1)
for t in range (t0 , tf + 1):
tempo[t] = t
xt[t] = xh(x0_,y0_,z0_,vx0,vy0,vz0,t,w_)
yt[t] = yh(x0_,y0_,z0_,vx0,vy0,vz0,t,w_)
zt[t] = zh(x0_,y0_,z0_,vx0,vy0,vz0,t,w_)
vxt[t] = vxh(x0_,y0_,z0_,vx0,vy0,vz0,t,w_)
vyt[t] = vyh(x0_,y0_,z0_,vx0,vy0,vz0,t,w_)
vzt[t] = vzh(x0_,y0_,z0_,vx0,vy0,vz0,t,w_)
r[t] = math.sqrt(xt[t]**2+yt[t]**2+zt[t]**2)
v[t] = math.sqrt(vxt[t]**2+vyt[t]**2+vzt[t]**2)
Data = | pd.DataFrame({'TEMPO': tempo, 'X[t]': xt, 'Y[t]': yt, 'Z[t]': zt, 'VXT': vxt, 'VYT': vyt, 'VZT': vzt, 'R[t]': r, 'V[t]': v}) | pandas.DataFrame |
"""Base classes for data management."""
# Authors: <NAME> <<EMAIL>>
# <NAME>
# License: MIT
import numpy as np
import pandas as pd
from .extraction import activity_power_profile
from .io import bikeread
from .utils import validate_filenames
class Rider(object):
"""User interface for a rider.
User interface to easily add, remove, compute information related to power.
Read more in the :ref:`User Guide <record_power_profile>`.
Parameters
----------
n_jobs : int, (default=1)
The number of workers to use for the different processing.
Attributes
----------
power_profile_ : DataFrame
DataFrame containing all information regarding the power-profile of a
rider for each ride.
"""
def __init__(self, n_jobs=1):
self.n_jobs = n_jobs
self.power_profile_ = None
def add_activities(self, filenames):
"""Compute the power-profile for each activity and add it to the
current power-profile.
Parameters
----------
filenames : str or list of str
A string a list of string to the file to read. You can use
wildcards to automatically check several files.
Returns
-------
None
Examples
--------
>>> from sksports.datasets import load_fit
>>> from sksports.base import Rider
>>> rider = Rider()
>>> rider.add_activities(load_fit()[0])
>>> rider.power_profile_.head() # doctest: +SKIP
2014-05-07 12:26:22
cadence 00:00:01 78.000000
00:00:02 64.000000
00:00:03 62.666667
00:00:04 62.500000
00:00:05 64.400000
"""
filenames = validate_filenames(filenames)
activities_pp = [activity_power_profile(bikeread(f))
for f in filenames]
activities_pp = pd.concat(activities_pp, axis=1)
if self.power_profile_ is not None:
try:
self.power_profile_ = self.power_profile_.join(activities_pp,
how='outer')
except ValueError as e:
if 'columns overlap but no suffix specified' in e.args[0]:
raise ValueError('One of the activity was already added'
' to the rider power-profile. Remove this'
' activity before to try to add it.')
else:
raise
else:
self.power_profile_ = activities_pp
def delete_activities(self, dates, time_comparison=False):
"""Delete the activities power-profile from some specific dates.
Parameters
----------
dates : list/tuple of datetime-like or str
The dates of the activities to be removed. The format expected is:
* datetime-like or str: a single activity will be deleted.
* a list of datetime-like or str: each activity for which the date
is contained in the list will be deleted.
* a tuple of datetime-like or str ``(start_date, end_date)``: the
activities for which the dates are included in the range will be
deleted.
time_comparison : bool, optional
Whether to make a strict comparison using time or to relax to
constraints with only the date.
Returns
-------
None
Examples
--------
>>> from sksports.datasets import load_rider
>>> from sksports import Rider
>>> rider = Rider.from_csv(load_rider())
>>> rider.delete_activities('07 May 2014')
>>> print(rider) # doctest: +SKIP
RIDER INFORMATION:
power-profile:
2014-05-11 09:39:38 2014-07-26 16:50:56
cadence 00:00:01 100.000000 60.000000
00:00:02 89.000000 58.000000
00:00:03 68.333333 56.333333
00:00:04 59.500000 59.250000
00:00:05 63.200000 61.000000
"""
def _strict_comparison(dates_pp, date, strict_equal):
if strict_equal:
return dates_pp == date
else:
return np.bitwise_and(
dates_pp >= date,
dates_pp <= pd.Timestamp(date) + pd.DateOffset(1))
if isinstance(dates, tuple):
if len(dates) != 2:
raise ValueError("Wrong tuple format. Expecting a tuple of"
" format (start_date, end_date). Got {!r}"
" instead.".format(dates))
mask_date = np.bitwise_and(
self.power_profile_.columns >= dates[0],
self.power_profile_.columns <= pd.Timestamp(dates[1]) +
pd.DateOffset(1))
elif isinstance(dates, list):
mask_date = np.any(
[_strict_comparison(self.power_profile_.columns, d,
time_comparison)
for d in dates], axis=0)
else:
mask_date = _strict_comparison(self.power_profile_.columns, dates,
time_comparison)
mask_date = np.bitwise_not(mask_date)
self.power_profile_ = self.power_profile_.loc[:, mask_date]
def record_power_profile(self, range_dates=None, columns=None):
"""Compute the record power-profile.
Parameters
----------
range_dates : tuple of datetime-like or str, optional
The start and end date to consider when computing the record
power-profile. By default, all data will be used.
columns : array-like or None, optional
Name of data field to return. By default, all available data will
be returned.
Returns
-------
record_power_profile : DataFrame
Record power-profile taken between the range of dates.
Examples
--------
>>> from sksports import Rider
>>> from sksports.datasets import load_rider
>>> rider = Rider.from_csv(load_rider())
>>> record_power_profile = rider.record_power_profile()
>>> record_power_profile.head() # doctest: +NORMALIZE_WHITESPACE
cadence distance elevation heart-rate power
00:00:01 60.000000 27162.600000 NaN NaN 750.000000
00:00:02 58.000000 27163.750000 NaN NaN 741.000000
00:00:03 56.333333 27164.586667 NaN NaN 731.666667
00:00:04 59.250000 27163.402500 NaN NaN 719.500000
00:00:05 61.000000 27162.142000 NaN NaN 712.200000
This is also possible to give a range of dates to compute the record
power-profile. We can also select some specific information.
>>> record_power_profile = rider.record_power_profile(
... range_dates=('07 May 2014', '11 May 2014'),
... columns=['power', 'cadence'])
>>> record_power_profile.head() # doctest: +SKIP
cadence power
00:00:01 100.000000 717.00
00:00:02 89.000000 717.00
00:00:03 68.333333 590.00
00:00:04 59.500000 552.25
00:00:05 63.200000 552.60
"""
if range_dates is None:
mask_date = np.ones_like(self.power_profile_.columns,
dtype=bool)
else:
mask_date = np.bitwise_and(
self.power_profile_.columns >= range_dates[0],
self.power_profile_.columns <= pd.Timestamp(range_dates[1]) +
| pd.DateOffset(1) | pandas.DateOffset |
# *****************************************************************************
#
# Copyright (c) 2020, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from functools import wraps
import numpy as np
import pandas as pd
from ..common import (
_TIMEFRAME_DIVSPLIT,
_UTC,
PyEXception,
_checkPeriodLast,
_expire,
_get,
_quoteSymbols,
_raiseIfNotStr,
_reindex,
_toDatetime,
)
from ..timeseries import timeSeries
@_expire(hour=8, tz=_UTC)
def balanceSheet(
symbol,
period="quarter",
last=1,
token="",
version="stable",
filter="",
format="json",
):
"""Pulls balance sheet data. Available quarterly (4 quarters) and annually (4 years)
https://iexcloud.io/docs/api/#balance-sheet
Updates at 8am, 9am UTC daily
Args:
symbol (str): Ticker to request
period (str): Period, either 'annual' or 'quarter'
last (int): Number of records to fetch, up to 12 for 'quarter' and 4 for 'annual'
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Returns:
dict or DataFrame: result
"""
_raiseIfNotStr(symbol)
symbol = _quoteSymbols(symbol)
_checkPeriodLast(period, last)
return _get(
"stock/{}/balance-sheet?period={}&last={}".format(symbol, period, last),
token=token,
version=version,
filter=filter,
format=format,
).get("balancesheet", [])
@wraps(balanceSheet)
def balanceSheetDF(*args, **kwargs):
return _reindex(
_toDatetime(pd.DataFrame(balanceSheet(*args, **kwargs))), "reportDate"
)
@_expire(hour=8, tz=_UTC)
def cashFlow(
symbol,
period="quarter",
last=1,
token="",
version="stable",
filter="",
format="json",
):
"""Pulls cash flow data. Available quarterly (4 quarters) or annually (4 years).
https://iexcloud.io/docs/api/#cash-flow
Updates at 8am, 9am UTC daily
Args:
symbol (str): Ticker to request
period (str): Period, either 'annual' or 'quarter'
last (int): Number of records to fetch, up to 12 for 'quarter' and 4 for 'annual'
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Returns:
dict or DataFrame: result
"""
_raiseIfNotStr(symbol)
symbol = _quoteSymbols(symbol)
_checkPeriodLast(period, last)
return _get(
"stock/{}/cash-flow?period={}&last={}".format(symbol, period, last),
token=token,
version=version,
filter=filter,
format=format,
).get("cashflow", [])
@wraps(cashFlow)
def cashFlowDF(*args, **kwargs):
df = _reindex(
_toDatetime(pd.DataFrame(cashFlow(*args, **kwargs))),
"reportDate",
)
df.replace(to_replace=[None], value=np.nan, inplace=True)
return df
@_expire(hour=9, tz=_UTC)
def dividends(
symbol,
timeframe="ytd",
token="",
version="stable",
filter="",
format="json",
):
"""Dividend history
https://iexcloud.io/docs/api/#dividends
Updated at 9am UTC every day
Args:
symbol (str): Ticker to request
timeframe (str): timeframe for data
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Returns:
dict or DataFrame: result
"""
_raiseIfNotStr(symbol)
symbol = _quoteSymbols(symbol)
if timeframe not in _TIMEFRAME_DIVSPLIT:
raise PyEXception("Range must be in %s" % str(_TIMEFRAME_DIVSPLIT))
return _get(
"stock/" + symbol + "/dividends/" + timeframe,
token=token,
version=version,
filter=filter,
format=format,
)
def _dividendsToDF(d):
return _reindex(_toDatetime(pd.DataFrame(d)), "exDate")
@wraps(dividends)
def dividendsDF(*args, **kwargs):
return _dividendsToDF(dividends(*args, **kwargs))
@_expire(hour=9, tz=_UTC)
def earnings(
symbol,
period="quarter",
last=1,
field="",
token="",
version="stable",
filter="",
format="json",
):
"""Earnings data for a given company including the actual EPS, consensus, and fiscal period. Earnings are available quarterly (last 4 quarters) and annually (last 4 years).
https://iexcloud.io/docs/api/#earnings
Updates at 9am, 11am, 12pm UTC every day
Args:
symbol (str): Ticker to request
period (str): Period, either 'annual' or 'quarter'
last (int): Number of records to fetch, up to 12 for 'quarter' and 4 for 'annual'
field (str): Subfield to fetch
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Returns:
dict or DataFrame: result
"""
_raiseIfNotStr(symbol)
symbol = _quoteSymbols(symbol)
_checkPeriodLast(period, last)
if not field:
return _get(
"stock/{}/earnings?period={}&last={}".format(symbol, period, last),
token=token,
version=version,
filter=filter,
format=format,
).get("earnings", [])
return _get(
"stock/{}/earnings/{}/{}?period={}".format(symbol, last, field, period),
token=token,
version=version,
filter=filter,
format=format,
).get("earnings", [])
def _earningsToDF(e):
"""internal"""
if e:
df = _reindex(_toDatetime( | pd.DataFrame(e) | pandas.DataFrame |
#-*- coding: utf-8 -*-
import sys
import random
import numpy as np
import pandas as pd
import utility_1
import h5py
import json
eps=1e-12
def countCG(strs):
strs = strs.upper()
return float((strs.count("C")+strs.count("G")))/(len(strs))
def countCG_N(strs):
strs = strs.upper()
return float((strs.count("C")+strs.count("G")))/(len(strs)-strs.count("N")+eps)
def countCG_skew(strs):
strs = strs.upper()
num1, num2 = strs.count("G"), strs.count("C")
return float((num1-num2))/(num1+num2+eps)
def one_hot_encoding(seq, seq_len):
vec1 = np.zeros((4,seq_len))
cnt = 0
for i in range(0,seq_len):
print(i)
if seq[i]=='A':
vec1[0,i] = 1
elif seq[i]=='G':
vec1[1,i] = 1
elif seq[i]=='C':
vec1[2,i] = 1
elif seq[i]=='T':
vec1[3,i] = 1
else:
pass
return np.int64(vec1)
def index_encoding(seq, seq_len, seq_dict):
vec1 = np.zeros(seq_len)
for i in range(0,seq_len):
vec1[i] = seq_dict[seq[i]]
return np.int64(vec1)
# Read sequences as strings ("N" retained)
def getString(fileStr):
file = open(fileStr, 'r')
gen_seq = ""
lines = file.readlines()
for line in lines:
line = line.strip()
gen_seq += line
gen_seq = gen_seq.upper()
return gen_seq
# Read sequences of format fasta ("N" removed)
def getStringforUnlabel(fileStr):
file = open(fileStr, 'r')
gen_seq = ""
lines = file.readlines()
for line in lines:
if(line[0] == ">"):
continue
else:
line = line.strip()
gen_seq += line
gen_seq = gen_seq.upper()
gen_seq = gen_seq.replace("N", "")
return gen_seq
def get_reverse_str(str):
str = str.upper()
str_new=""
for i in range(len(str)):
if(str[i]=="T"):
str_new+="A"
elif(str[i]=="A"):
str_new+="T"
elif(str[i]=="G"):
str_new+="C"
elif(str[i]=="C"):
str_new+="G"
else:
str_new+=str[i]
return str_new
# Get sequence of 2K+1 centered at pos
def getSubSeq(str, pos, K):
n = len(str)
l = pos - K
r = pos + K + 1
if l > r or l < 0 or r > n - 1:
return 0
elif "N" in str[l:r]:
return 0
return str[l:r]
# Get sequence of 2K+1 centered at pos
def getSubSeq2(str, pos, K):
n = len(str)
l = max(0, pos - K)
r = min(n - 1, pos + K + 1)
if l > r:
print(l, pos, r)
print("left pointer is bigger than right one")
return 0
return str[l:pos]+" "+str[pos]+" "+str[pos+1:r]
# Convert DNA to sentences with overlapping window of size K
def DNA2Sentence(dna, K):
sentence = ""
length = len(dna)
for i in range(length - K + 1):
sentence += dna[i: i + K] + " "
# remove spaces
sentence = sentence[0 : len(sentence) - 1]
return sentence
# Convert DNA to sentences with overlapping window of size K in reverse direction
def DNA2SentenceReverse(dna, K):
sentence = ""
length = len(dna)
for i in range(length - K + 1):
j = length - K - i
sentence += dna[j: j + K] + " "
# remove spaces
sentence = sentence[0 : len(sentence) - 1]
return sentence
def reverse(s):
str = ""
for i in s:
str = i + str
return str
# Convert DNA to sentences with overlapping window of size K in reverse direction
def DNA2SentenceReverse_1(dna, K):
sentence = ""
length = len(dna)
dna = reverse(dna)
for i in range(length - K + 1):
sentence += dna[i: i + K] + " "
# remove spaces
sentence = sentence[0 : len(sentence) - 1]
return sentence
# Convert DNA to sentences with non-overlapping window of size K
def DNA2SentenceJump(dna, K,step):
sentence = ""
length = len(dna)
i=0
while i <= length - K:
sentence += dna[i: i + K] + " "
i += step
return sentence
# Convert DNA to sentences with non-overlapping window of size K in reverse direction
def DNA2SentenceJumpReverse(dna, K,step):
sentence = ""
length = len(dna)
i=0
while j <= length - K:
i = length - K - j
sentence += dna[i: i + K] + " "
j += step
return sentence
def gen_Seq(Range):
print ("Generating Seq...")
table = pd.read_table(PATH1+"prep_data.txt",sep = "\t")
print (len(table))
table.drop_duplicates()
print (len(table))
label_file = open(PATH1+"LabelSeq", "w")
total = len(table)
list = ["chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", \
"chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", \
"chr18", "chr19", "chr20", "chr21", "chr22", "chrX", "chrY","chrM"]
number_positive = 0
dict_pos={}
for i in range(total):
if (number_positive % 100 == 0) and (number_positive != 0):
print ("number of seq: %d of %d\r" %(number_positive,total),end = "")
sys.stdout.flush()
chromosome = table["chromosome"][i]
if chromosome in dict_pos.keys():
strs = dict_pos[chromosome]
else:
strs = processSeq.getString(ROOT_PATH1+"Chromosome_38/" + str(chromosome) + ".fa")
dict_pos[chromosome] = strs
bias = 7
start = int(table["start"][i] - 1 - Range + bias)
end = start + 23 + Range*2
strand = table["strand"][i]
edstrs1 = strs[start : end]
if strand == "-":
edstrs1 = edstrs1[::-1]
edstrs1 = processSeq.get_reverse_str(edstrs1)
if "N" in edstrs1:
table = table.drop(i)
continue
outstr = "%s\n"%(edstrs1)
label_file.write(outstr)
number_positive += 1
table.to_csv(PATH1+"prep_data.txt",sep = "\t",index = False)
def get_target():
table = pd.read_table(PATH1+"prep_data.txt", sep="\t")
print (len(table))
table.drop_duplicates()
print (len(table))
target_file = open(PATH1+"TargetSeq", "w")
for i in range(len(table)):
target = table['target'][i].upper()
target_file.write(target+"\n")
target_file.close()
def prep_data():
chrom_list = ["chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", \
"chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", \
"chr18", "chr19", "chr20", "chr21", "chr22", "chrX", "chrY","chrM"]
tab = pd.read_table(PATH1+"casoffinder_CHANGEseq_joined.tsv",sep = '\t')
tab = tab[tab['chromosome'].isin(chrom_list)]
tab['label'] = 1 - tab['reads'].isna()
tab['end'] = tab['start'] + 23
print (tab['chromosome'].unique())
tab.to_csv(PATH1+"prep_data.txt",sep = "\t",index = False)
def load_file(f_name,length,vec_name):
base_code = {
'A': 0,
'C': 1,
'G': 2,
'T': 3,
}
num_pairs = sum(1 for line in open(f_name))
# number of sample pairs
num_bases = 4
with open(f_name, 'r') as f:
line_num = 0 # number of lines (i.e., samples) read so far
for line in f.read().splitlines():
if (line_num % 100 == 0) and (line_num != 0):
print ("number of input data: %d\r" %(line_num),end= "")
sys.stdout.flush()
if line_num == 0:
# allocate space for output
seg_length = length # number of bases per sample
Xs_seq1 = np.zeros((num_pairs, num_bases, seg_length))
for start in range(len(line)):
if line[start] in base_code:
print (start)
break
base_num = 0
for x in line[start:start+length]:
if x != "N":
Xs_seq1[line_num, base_code[x], base_num] = 1
base_num += 1
line_num += 1
X = Xs_seq1
np.save("../%s" %(vec_name),X)
def kmer_dict(K):
vec1 = ['A','G','C','T']
vec2 = vec1.copy() # kmer dict
vec3 = []
num1 = len(vec1)
for k1 in range(1,K):
for character in vec1:
for temp1 in vec2:
seq1 = character+temp1
vec3.append(seq1)
vec2 = vec3.copy()
vec3 = []
return vec2
def kmer_counting(seq, K, kmer_dict1):
len1 = len(kmer_dict1)
vec = np.zeros((len1),dtype=np.float32)
len2 = len(seq)-K+1
cnt = 0
for kmer in kmer_dict1:
num1 = seq.count(kmer)
vec[cnt] = num1
cnt = cnt+1
vec = vec*1.0/len2
return vec
def align_region(species_id):
col1, col2, col3 = '%s.chrom'%(species_id), '%s.start'%(species_id), '%s.stop'%(species_id)
def load_seq_kmer(species_id, file1, filename2, K, kmer_dict1):
# file1 = pd.read_csv(filename1,sep='\t')
col1, col2, col3 = '%s.chrom'%(species_id), '%s.start'%(species_id), '%s.stop'%(species_id)
chrom, start, stop, serial = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1['serial'])
num1 = len(chrom)
file = open(filename2, 'r')
# serial_list, line_list = [], []
serial_list = -np.ones((num1,2))
f_list = np.zeros((num1,feature_dim))
lines = file.readlines()
num_line = len(lines)
cnt = -1
flag = 0
print(num_line,num1)
# temp1 = int(num_line/2)
for line in lines:
if(line[0]==">"):
# continue
# line: >chr1:5-10
cnt = cnt + 1
str1 = line[1:]
temp1 = str1.split(':')
t_chrom = temp1[0]
temp2 = temp1[1].split('-')
t_start, t_stop = int(temp2[0]), int(temp2[1])
chrom1, start1, stop1, serial1 = chrom[cnt], start[cnt], stop[cnt], serial[cnt]
if (chrom1==t_chrom) and (start1==t_start) and (stop1==t_stop):
flag = 1
else:
b = np.where((chrom==t_chrom)&(start==t_start)&(stop==t_stop))[0]
if len(b)>0:
cnt = b[0]
flag = 1
else:
if flag == 1:
line = line.strip().upper()
vec = kmer_counting(line,K,kmer_dict1)
# line_list.append(line)
# f_list.append(vec)
# line_list.append(line)
# N_list.append(line.count('N'))
flag = 0
serial_list[cnt,0], serial_list[cnt,1] = serial[cnt], line.count('N')
f_list[cnt] = vec
filename1 = '%s.vec'%(species_id)
np.save(filename1,(serial_list,f_list))
return serial_list, f_list
# load the annotation file and the sequence feature file
# return kmer feature: num_samples*feature_dim
# return one-hot encoding feature: num_samples*4*feature_dim
def load_seq_1_ori(species_id, file1, filename2, K, kmer_dict1):
# file1 = pd.read_csv(filename1,sep='\t')
col1, col2, col3 = '%s.chrom'%(species_id), '%s.start'%(species_id), '%s.stop'%(species_id)
chrom, start, stop, serial = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1['serial'])
label = np.asarray(file1['label'])
group_label = np.asarray(file1['group_label'])
signal = np.asarray(file1['signal'])
num1 = len(chrom)
len1 = stop-start
seq_len = int(np.median(len1))
file = open(filename2, 'r')
# serial_list, line_list = [], []
serial_list = -np.ones((num1,2))
feature_dim = len(kmer_dict1)
f_list = np.zeros((num1,feature_dim))
f_mtx = np.zeros((num1,4,seq_len))
lines = file.readlines()
num_line = len(lines)
cnt = -1
flag = 0
print(num_line,num1)
# temp1 = int(num_line/2)
i = 0
for line in lines:
if(line[0]==">"):
# continue
# line: >chr1:5-10
print(cnt)
cnt = cnt + 1
str1 = line[1:]
temp1 = str1.split(':')
t_chrom = temp1[0]
temp2 = temp1[1].split('-')
t_start, t_stop = int(temp2[0]), int(temp2[1])
chrom1, start1, stop1, serial1 = chrom[cnt], start[cnt], stop[cnt], serial[cnt]
if (chrom1==t_chrom) and (start1==t_start) and (stop1==t_stop):
flag = 1
else:
b = np.where((chrom==t_chrom)&(start==t_start)&(stop==t_stop))[0]
if len(b)>0:
cnt = b[0]
flag = 1
else:
if flag == 1:
line = line.strip().upper()
vec = kmer_counting(line,K,kmer_dict1)
# line_list.append(line)
# f_list.append(vec)
flag = 0
serial_list[cnt,0], serial_list[cnt,1] = serial[cnt], line.count('N')
f_list[cnt] = vec
f_mtx[cnt] = one_hot_encoding(line, seq_len)
i += 1
if i % 100 == 0:
print("%d of %d\r" %(i,num1), end = "")
sys.stdout.flush()
b = np.where(serial_list[:,0]>=0)[0]
serial_list, f_list, f_mtx, label, group_label = serial_list[b], f_list[b], f_mtx[b], label[b], group_label[b]
# filename1 = '%s.vec'%(species_id)
# np.save(filename1,(serial_list,f_list))
return serial_list, f_list, f_mtx, label, group_label, signal
# load feature
def load_seq_altfeature_1(species_id, file1, filename2, output_filename):
# file1 = pd.read_csv(filename1,sep='\t')
col1, col2, col3 = '%s.chrom'%(species_id), '%s.start'%(species_id), '%s.stop'%(species_id)
chrom, start, stop, serial = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1['serial'])
label = np.asarray(file1['label'])
group_label = np.asarray(file1['group_label'])
signal = np.asarray(file1['signal'])
num1 = len(chrom)
len1 = stop-start
seq_len = int(np.median(len1))
file = open(filename2, 'r')
# serial_list, line_list = [], []
serial_list = -np.ones((num1,3))
feature_dim = 3
# num1 = 2000
f_list = np.zeros((num1,feature_dim))
# f_mtx = np.zeros((num1,4,seq_len))
lines = file.readlines()
num_line = len(lines)
cnt = -1
flag = 0
print(num_line,num1)
# temp1 = int(num_line/2)
i = 0
serial_vec, seq_vec = [], []
for line in lines:
if(line[0]==">"):
# continue
# line: >chr1:5-10
# print(cnt)
cnt = cnt + 1
str1 = line[1:]
temp1 = str1.split(':')
t_chrom = temp1[0]
temp2 = temp1[1].split('-')
t_start, t_stop = int(temp2[0]), int(temp2[1])
chrom1, start1, stop1, serial1 = chrom[cnt], start[cnt], stop[cnt], serial[cnt]
if (chrom1==t_chrom) and (start1==t_start) and (stop1==t_stop):
flag = 1
else:
b = np.where((chrom==t_chrom)&(start==t_start)&(stop==t_stop))[0]
if len(b)>0:
cnt = b[0]
flag = 1
else:
if flag == 1:
line = line.strip().upper()
# vec = kmer_counting(line,K,kmer_dict1)
serial_vec.append([cnt,serial[cnt]])
seq_vec.append(line)
GC_profile = countCG(line)
GC_profile1 = countCG_N(line)
GC_skew = countCG_skew(line)
vec = [GC_profile,GC_profile1,GC_skew]
# line_list.append(line)
# f_list.append(vec)
flag = 0
serial_list[cnt,0], serial_list[cnt,1], serial_list[cnt,2] = serial[cnt], len(line), line.count('N')
f_list[cnt] = vec
# f_mtx[cnt] = one_hot_encoding(line, seq_len)
i += 1
if i % 1000 == 0:
print("%d of %d\r" %(i,num1), end = "")
sys.stdout.flush()
# if cnt>1000:
# break
# b = np.where(serial_list[:,0]>=0)[0]
# serial_list, f_list, f_mtx, label, group_label = serial_list[b], f_list[b], f_mtx[b], label[b], group_label[b]
# filename1 = '%s.vec'%(species_id)
# np.save(filename1,(serial_list,f_list))
serial_vec = np.asarray(serial_vec)
fields = ['index','serial','seq']
data1 = pd.DataFrame(columns=fields)
data1[fields[0]], data1[fields[1]] = serial_vec[:,0], serial_vec[:,1]
data1[fields[2]] = seq_vec
# data1.to_csv('test_seq.txt',index=False,sep='\t')
data1.to_csv(output_filename,index=False,sep='\t')
return serial_list, f_list, label, group_label, signal
# feature 1: GC profile
# feature 2: GC skew
# def load_seq_altfeature(filename2, K, kmer_dict1, sel_idx):
def load_seq_altfeature(filename2, sel_idx):
file2 = pd.read_csv(filename2,sep='\t')
seq = np.asarray(file2['seq'])
if len(sel_idx)>0:
seq = seq[sel_idx]
num1 = len(seq)
print("number of sequences %d"%(num1))
feature_dim = 3
f_list = np.zeros((num1,feature_dim))
for i in range(0,num1):
sequence = seq[i].strip()
GC_profile = countCG(sequence)
GC_profile1 = countCG_N(sequence)
GC_skew = countCG_skew(sequence)
f_list[i] = [GC_profile,GC_profile1,GC_skew]
if i % 1000 == 0:
print("%d of %d\r" %(i,num1), end = "")
sys.stdout.flush()
return f_list
def load_seq_1(species_id, file1, filename2, K, kmer_dict1, output_filename):
# file1 = pd.read_csv(filename1,sep='\t')
col1, col2, col3 = '%s.chrom'%(species_id), '%s.start'%(species_id), '%s.stop'%(species_id)
chrom, start, stop, serial = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1['serial'])
label = np.asarray(file1['label'])
group_label = np.asarray(file1['group_label'])
signal = np.asarray(file1['signal'])
num1 = len(chrom)
len1 = stop-start
seq_len = int(np.median(len1))
file = open(filename2, 'r')
# serial_list, line_list = [], []
serial_list = -np.ones((num1,2))
feature_dim = len(kmer_dict1)
f_list = np.zeros((num1,feature_dim))
f_mtx = np.zeros((num1,4,seq_len),dtype=np.float32)
lines = file.readlines()
num_line = len(lines)
cnt = -1
flag = 0
print(num_line,num1)
# temp1 = int(num_line/2)
i = 0
serial_vec, seq_vec = [], []
for line in lines:
if(line[0]==">"):
# continue
# line: >chr1:5-10
# print(cnt)
cnt = cnt + 1
str1 = line[1:]
temp1 = str1.split(':')
t_chrom = temp1[0]
temp2 = temp1[1].split('-')
t_start, t_stop = int(temp2[0]), int(temp2[1])
chrom1, start1, stop1, serial1 = chrom[cnt], start[cnt], stop[cnt], serial[cnt]
if (chrom1==t_chrom) and (start1==t_start) and (stop1==t_stop):
flag = 1
else:
b = np.where((chrom==t_chrom)&(start==t_start)&(stop==t_stop))[0]
if len(b)>0:
cnt = b[0]
flag = 1
else:
if flag == 1:
line = line.strip().upper()
vec = kmer_counting(line,K,kmer_dict1)
serial_vec.append([cnt,serial[cnt]])
seq_vec.append(line)
# line_list.append(line)
# f_list.append(vec)
flag = 0
serial_list[cnt,0], serial_list[cnt,1] = serial[cnt], line.count('N')
f_list[cnt] = vec
# f_mtx[cnt] = one_hot_encoding(line, seq_len)
i += 1
if i % 1000 == 0:
print("%d of %d\r" %(i,num1), end = "")
sys.stdout.flush()
# if cnt>1000:
# break
# b = np.where(serial_list[:,0]>=0)[0]
# serial_list, f_list, f_mtx, label, group_label = serial_list[b], f_list[b], f_mtx[b], label[b], group_label[b]
# filename1 = '%s.vec'%(species_id)
# np.save(filename1,(serial_list,f_list))
serial_vec = np.asarray(serial_vec)
fields = ['index','serial','seq']
data1 = pd.DataFrame(columns=fields)
data1[fields[0]], data1[fields[1]] = serial_vec[:,0], serial_vec[:,1]
data1[fields[2]] = seq_vec
# data1.to_csv('test_seq.txt',index=False,sep='\t')
data1.to_csv(output_filename,index=False,sep='\t')
return serial_list, f_list, f_mtx, label, group_label, signal
def load_seq_1_1(species_id, filename1, header, filename2, output_filename):
file1 = | pd.read_csv(filename1,sep='\t',header=header) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""Cross references from cbms2019.
.. seealso:: https://github.com/pantapps/cbms2019
"""
import pandas as pd
from pyobo.constants import (
PROVENANCE,
SOURCE_ID,
SOURCE_PREFIX,
TARGET_ID,
TARGET_PREFIX,
XREF_COLUMNS,
)
__all__ = [
"get_cbms2019_xrefs_df",
]
#: Columns: DOID, DO name, xref xb, xref ix
base_url = "https://raw.githubusercontent.com/pantapps/cbms2019/master"
doid_to_all = f"{base_url}/mesh_icd10cm_via_do_not_mapped_umls.tsv"
#: Columns: SNOMEDCT_ID, SNOMEDCIT_NAME, ICD10CM_ID, ICD10CM_NAME, MESH_ID
all_to_all = f"{base_url}/mesh_icd10cm_via_snomedct_not_mapped_umls.tsv"
#: Columns: DOID, DO name, xref xb, xref ix
doid_to_all_2 = f"{base_url}/mesh_snomedct_via_do_not_mapped_umls.tsv"
#: Columns: SNOMEDCT_ID, SNOMEDCIT_NAME, ICD10CM_ID, ICD10CM_NAME, MESH_ID
all_to_all_2 = f"{base_url}/mesh_snomedct_via_icd10cm_not_mapped_umls.tsv"
NSM = {
"MESH": "mesh",
"ICD10CM": "icd",
"SNOMEDCT_US_2016_03_01": "snomedct",
}
def _get_doid(url: str) -> pd.DataFrame:
df = pd.read_csv(url, sep="\t", usecols=["DO_ID", "resource", "resource_ID"])
df.columns = [SOURCE_ID, TARGET_PREFIX, TARGET_ID]
df[SOURCE_PREFIX] = "doid"
df[SOURCE_ID] = df[SOURCE_ID].map(lambda s: s[len("DOID:") :])
df[PROVENANCE] = url
df[TARGET_PREFIX] = df[TARGET_PREFIX].map(NSM.get)
df = df[XREF_COLUMNS]
return df
def _get_mesh_to_icd_via_doid() -> pd.DataFrame:
return _get_doid(doid_to_all)
def _get_mesh_to_icd_via_snomedct() -> pd.DataFrame:
df = | pd.read_csv(all_to_all, sep="\t", usecols=["SNOMEDCT_ID", "ICD10CM_ID", "MESH_ID"]) | pandas.read_csv |
import dataiku
from dataiku.customrecipe import *
from dataiku import pandasutils as pdu
import pandas as pd, numpy as np
import cvxpy
# Retrieve input and output dataset names
input_dataset_name = get_input_names_for_role('input_dataset')[0]
output_dataset_name = get_output_names_for_role('output_dataset')[0]
# Retrieve mandatory user-defined parameters
label_col = get_recipe_config()['label_col']
cost_col = get_recipe_config()['cost_col']
value_col = get_recipe_config()['value_col']
cap = get_recipe_config()['cap']
selection_n = get_recipe_config()['selection_n']
# Retrieve optional user-defined parameters
agg_col = get_recipe_config().get('agg_col', None)
actual_col = get_recipe_config().get('actual_col', None)
top_n = get_recipe_config().get('top_n', 1)
# Error checking of user-defined parameters
# Read input dataset as dataframe
input_dataset = dataiku.Dataset(input_dataset_name)
knapsack_df = input_dataset.get_dataframe()
# -------------------------------------------------------------------------------- NOTEBOOK-CELL: CODE
agg_distinct = list(knapsack_df[agg_col].unique())
agg_distinct.sort()
# -------------------------------------------------------------------------------- NOTEBOOK-CELL: CODE
# Inspiration from: https://towardsdatascience.com/integer-programming-in-python-1cbdfa240df2
def knapsack(df, label_col, value_col, cost_col, selection_n, max_val):
# gather elements to select from
costs = np.array(df[cost_col])
values = np.array(df[value_col])
selection = cvxpy.Variable(len(costs), boolean=True)
# objective function (to maximize)
total_value = values @ selection
# universal constraints (weight constraints and constraint on number of selections)
cost_constraint = costs @ selection <= cap
roster_constraint = np.ones(len(costs)) @ selection == selection_n
max_constraint = values @ selection <= max_val # max val allows us to get "n best" optimal values
# specific constraints (custom constraints for situation)
pg = np.array(df['PG'])
sg = np.array(df['SG'])
sf = np.array(df['SF'])
pf = np.array(df['PF'])
c = np.array(df['C'])
pg_constraint = pg @ selection >= 1
sg_constraint = sg @ selection >= 1
sf_constraint = sf @ selection >= 1
pf_constraint = pf @ selection >= 1
c_constraint = c @ selection >= 1
g_constraint = (pg + sg) @ selection >= 3
f_constraint = (sf + pf) @ selection >= 3
knapsack_problem = cvxpy.Problem(cvxpy.Maximize(total_value), [cost_constraint, roster_constraint, max_constraint,
pg_constraint, sg_constraint, sf_constraint,
pf_constraint, c_constraint, g_constraint, f_constraint])
value_opt = knapsack_problem.solve(solver=cvxpy.GLPK_MI)
selection_opt = np.array(selection.value).astype(int)
total_cost = (costs @ selection).value
return selection_opt, value_opt, total_cost
# -------------------------------------------------------------------------------- NOTEBOOK-CELL: CODE
output_data = []
for agg_bin in agg_distinct:
print(agg_bin)
max_val = np.inf
for iteration in range(top_n):
knapsack_bin_df = knapsack_df.loc[knapsack_df[agg_col] == agg_bin]
selection_opt, value_opt, total_cost = knapsack(knapsack_bin_df, label_col, value_col, cost_col, selection_n, max_val)
labels, actuals = list(knapsack_bin_df[label_col]), list(knapsack_bin_df[actual_col])
labels_opt, actuals_opt = [], []
for i in range(len(selection_opt)):
if selection_opt[i]:
labels_opt.append(labels[i])
actuals_opt.append(actuals[i])
output_row = [agg_bin]
output_row.extend(labels_opt)
output_row.extend([int(total_cost), np.round(value_opt, 3), np.sum(actuals_opt)])
output_data.append(output_row)
max_val = value_opt - 10**-3
# -------------------------------------------------------------------------------- NOTEBOOK-CELL: CODE
output_cols = [agg_col]
output_cols.extend(range(1, selection_n+1))
output_cols.extend(['Total_Cost', 'Value_Predict', 'Value_Actual'])
# -------------------------------------------------------------------------------- NOTEBOOK-CELL: CODE
nba_knapsack_output_df = | pd.DataFrame(output_data, columns=output_cols) | pandas.DataFrame |
import sys
import warnings
warnings.filterwarnings("ignore")
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import pandas as pd
from mgefinder import bowtie2tools
from mgefinder.inferseq import InferSequence
import click
from os.path import dirname
def _inferseq_reference(pairsfile, inferseq_reference, min_perc_identity, max_internal_softclip_prop,
max_inferseq_size, min_inferseq_size, keep_intermediate, output_file):
pairs = pd.read_csv(pairsfile, sep='\t', keep_default_na=False, na_values=[
'-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A', 'N/A', '#NA', 'NULL', 'NaN', '-NaN', 'nan', '-nan'])
handle_empty_pairsfile(pairs, output_file)
index_genome(inferseq_reference)
tmp_dir = dirname(output_file)
inferer = InferSequence(
pairs, inferseq_reference, min_perc_identity, max_internal_softclip_prop, max_inferseq_size,
min_inferseq_size, keep_intermediate, 'inferred_reference', tmp_dir
)
inferred_sequences = inferer.infer_sequences()
click.echo("Inferred sequences for %d pairs..." % len(set(list(inferred_sequences['pair_id']))))
click.echo("Writing results to file %s..." % output_file)
if pairs.shape[0] > 0:
sample_id = list(pairs['sample'])[0]
inferred_sequences.insert(0, 'sample', sample_id)
else:
inferred_sequences.insert(0, 'sample', None)
inferred_sequences.to_csv(output_file, sep='\t', index=False)
def index_genome(inferseq_reference):
if not bowtie2tools.genome_is_indexed(inferseq_reference):
click.echo("Indexing inferseq reference genome...")
bowtie2tools.index_genome(inferseq_reference)
click.echo("Genome has been indexed...")
def handle_empty_pairsfile(pairs, output_file):
if pairs.shape[0] == 0:
outfile = | pd.DataFrame(columns=['pair_id', 'method', 'loc', 'inferred_seq_length', 'inferred_seq']) | pandas.DataFrame |
import os, copy
import numpy
import pandas as pd
import utils.data_connection.constant_variables_db as cons
from utils.data_connection.api_data_manager import APISourcesFetcher
from datetime import datetime, timedelta
from unittest import TestCase, mock
from active_companies.src.models.train.active_companies_algorithm import OliverClassifierTimeSeries
from utils.data_connection.source_manager import Connector
from utils.utilities import get_today_year_week
class TestOliverClassifier(TestCase):
@mock.patch.dict(os.environ, {'DB_USER': 'root'})
@mock.patch.dict(os.environ, {'DB_PASSWORD': ''})
@mock.patch.dict(os.environ, {'DB_HOST': 'localhost'})
@mock.patch.dict(os.environ, {'DB_PORT': '3306'})
def setUp(self):
db_connector = Connector(os.getenv("DB_USER"), os.getenv("DB_PASSWORD"), os.getenv("DB_HOST"),
os.getenv("DB_PORT"))
self.classifier = OliverClassifierTimeSeries(api_sources_fetcher=APISourcesFetcher(db_connector))
self.first_iteration_reply = (datetime.now() - timedelta(weeks=2))
self.last_iteration_reply = (datetime.now() - timedelta(weeks=1))
self.mock_companies = {'id': ['1', '2', '3', '4'],
'name': ['xpto1', 'xpto2', 'xpto3', 'xpto4'],
'domain': ['xpto.com', 'xpto.com', 'xpto.com', 'xpto.com'],
'created_at': ['2019-05-10 10:10:10', '2019-05-10 10:10:10', '2019-05-10 10:10:10',
'2019-05-10 10:10:10'],
'updated_at': ['2019-05-10 10:10:10', '2019-05-10 10:10:10', '2019-05-10 10:10:10',
'2019-05-10 10:10:10'],
'language': ['de_DE', 'de_DE', 'en_US', 'de_de'],
'is_enabled': [1, 1, 1, 1],
'deleted_at': [None, None, None, None]
}
self.mock_company_users = {'id': ['1', '2', '3', '4', '5', '6'],
'company_id': ['1', '1', '1', '1', '1', '2'],
'user_id': ['1', '2', '3', '4', '5', '6'],
'is_general_manager': ['1', '1', '0', '1', '0', '0'],
'is_admin': ['0', '0', '1', '0', '0', '1'],
'roles': ['xpto', 'xpto', 'xpto', 'xpto', 'xpto', 'xpto'],
'created_at': ['2019-05-10 10:10:10', '2019-05-10 10:10:10', '2019-05-10 10:10:10',
'2019-05-10 10:10:10', '2019-05-10 10:10:10', '2019-05-10 10:10:10'],
'updated_at': ['2019-05-10 10:10:10', '2019-05-10 10:10:10', '2019-05-10 10:10:10',
'2019-05-10 10:10:10', '2019-05-10 10:10:10', '2019-05-10 10:10:10'],
'deleted_at': [None, None, None, None, None, None],
'is_enabled': [1, 1, 1, 1, 1, 1]
}
self.mock_survey_replies = {'id': ['1', '2', '3', '4', '5', '6', '7', '8'],
'survey_question_id': ['1', '2', '3', '4', '1', '2', '3', '4'],
'user_id': ['1', '2', '3', '4', '1', '2', '3', '4'],
'rating': ['1', '2', '3', '4', '1', '2', '3', '4'],
'created_at': [self.first_iteration_reply, self.first_iteration_reply,
self.first_iteration_reply,
self.first_iteration_reply, self.last_iteration_reply,
self.last_iteration_reply,
self.last_iteration_reply, self.last_iteration_reply],
'user_timezone': ['Europe/Lisbon', 'Europe/London', 'Europe/London',
'Europe/Lisbon', 'Europe/Lisbon', 'Europe/London',
'Europe/London',
'Europe/Lisbon'],
'system_timezone': ['Europe/Lisbon', 'Europe/London', 'Europe/London',
'Europe/Lisbon', 'Europe/Lisbon', 'Europe/London',
'Europe/London',
'Europe/Lisbon'],
'survey_iteration_token_id': ['token1', 'token2', 'token3', 'token4', 'token1',
'token2', 'token3', 'token4'],
'comment': ['xpto', 'xpto', '', 'xpto', 'xpto', 'xpto', '', 'xpto'],
'comment_deleted_at': [None, None, None, None, None, None, None,
None]
}
def tearDown(self):
del self.classifier, self.mock_companies, self.mock_company_users, self.mock_survey_replies
@mock.patch('utils.data_connection.api_data_manager.APISourcesFetcher.get_surveys_mood', autospec=True)
@mock.patch('utils.data_connection.api_data_manager.APISourcesFetcher.get_companies_users', autospec=True)
@mock.patch('utils.data_connection.api_data_manager.APISourcesFetcher.get_companies_info', autospec=True)
def test_identify_active_companies(self, mock_get_companies_info,
mock_get_companies_users,
mock_get_surveys_mood):
mock_get_companies_info.return_value = pd.DataFrame(self.mock_companies,
columns=cons.COMPANIES_COLUMN_NAMES)
mock_get_companies_users.return_value = pd.DataFrame(self.mock_company_users,
columns=cons.COMPANY_USERS_COLUMN_NAMES)
mock_get_surveys_mood.return_value = pd.DataFrame(self.mock_survey_replies,
columns=cons.SURVEYS_REPLIES_COLUMN_NAMES)
companies_names, year, week = self.classifier.identify_active_companies()
year, week = get_today_year_week()
self.assertFalse(companies_names.empty)
self.assertIsInstance(year, int)
self.assertIsInstance(week, int)
self.assertTrue(companies_names['id'][0] == '1')
self.assertEqual(year, year)
self.assertEqual(week, week)
@mock.patch('utils.data_connection.api_data_manager.APISourcesFetcher.get_companies_info', autospec=True)
def test_no_valid_companies(self, mock_get_companies_info):
mock_get_companies_info.return_value = pd.DataFrame()
result, _, _ = self.classifier.identify_active_companies()
self.assertTrue(result.empty)
@mock.patch('utils.data_connection.api_data_manager.APISourcesFetcher.get_companies_info', autospec=True)
def test_filter_disabled_companies(self, mock_get_companies_info):
mock_companies = copy.deepcopy(self.mock_companies)
mock_companies['is_enabled'][0] = 0
mock_get_companies_info.return_value = pd.DataFrame(mock_companies,
columns=cons.COMPANIES_COLUMN_NAMES)
self.classifier.set_companies()
self.classifier._prepare_viable_companies()
self.assertFalse(pd.DataFrame.equals(self.classifier.companies, self.classifier.viable_companies))
self.assertTrue(self.classifier.viable_companies.id.count() == 3)
@mock.patch('utils.data_connection.api_data_manager.APISourcesFetcher.get_companies_info', autospec=True)
def test_filter_deleted_companies(self, mock_get_companies_info):
mock_companies = copy.deepcopy(self.mock_companies)
mock_companies['deleted_at'][0] = '2019-05-10 10:10:10'
mock_get_companies_info.return_value = pd.DataFrame(mock_companies,
columns=cons.COMPANIES_COLUMN_NAMES)
self.classifier.set_companies()
self.classifier._prepare_viable_companies()
self.assertFalse(pd.DataFrame.equals(self.classifier.companies, self.classifier.viable_companies))
self.assertTrue(self.classifier.viable_companies.id.count() == 3)
@mock.patch('utils.data_connection.api_data_manager.APISourcesFetcher.get_companies_info', autospec=True)
def test_filter_blacklisted_companies(self, mock_get_companies_info):
mock_companies = copy.deepcopy(self.mock_companies)
mock_companies['domain'][0] = 'guerrilla.net'
mock_get_companies_info.return_value = pd.DataFrame(mock_companies,
columns=cons.COMPANIES_COLUMN_NAMES)
self.classifier.set_companies()
self.classifier._prepare_viable_companies()
self.assertFalse(pd.DataFrame.equals(self.classifier.companies, self.classifier.viable_companies))
self.assertTrue(self.classifier.viable_companies.id.count() == 3)
@mock.patch('utils.data_connection.api_data_manager.APISourcesFetcher.get_companies_users', autospec=True)
@mock.patch('utils.data_connection.api_data_manager.APISourcesFetcher.get_companies_info', autospec=True)
def test_filter_disabled_users(self, mock_get_companies_info, mock_get_companies_users):
mock_company_users = copy.deepcopy(self.mock_company_users)
mock_company_users['is_enabled'][5] = 0
mock_get_companies_info.return_value = pd.DataFrame(self.mock_companies,
columns=cons.COMPANIES_COLUMN_NAMES)
mock_get_companies_users.return_value = pd.DataFrame(mock_company_users,
columns=cons.COMPANY_USERS_COLUMN_NAMES)
self.classifier.set_companies()
self.classifier._prepare_viable_companies()
self.classifier.set_users()
self.classifier._prepare_viable_users(viable_companies=self.classifier.viable_companies)
self.assertFalse(pd.DataFrame.equals(self.classifier.users, self.classifier.viable_users))
self.assertTrue(self.classifier.viable_users.id.count() == 5)
@mock.patch('utils.data_connection.api_data_manager.APISourcesFetcher.get_companies_users', autospec=True)
@mock.patch('utils.data_connection.api_data_manager.APISourcesFetcher.get_companies_info', autospec=True)
def test_filter_deleted_users(self, mock_get_companies_info, mock_get_companies_users):
mock_company_users = copy.deepcopy(self.mock_company_users)
mock_company_users['deleted_at'][5] = '2019-05-10 10:10:10'
mock_get_companies_info.return_value = pd.DataFrame(self.mock_companies,
columns=cons.COMPANIES_COLUMN_NAMES)
mock_get_companies_users.return_value = pd.DataFrame(mock_company_users,
columns=cons.COMPANY_USERS_COLUMN_NAMES)
self.classifier.set_companies()
self.classifier._prepare_viable_companies()
self.classifier.set_users()
self.classifier._prepare_viable_users(viable_companies=self.classifier.viable_companies)
self.assertFalse( | pd.DataFrame.equals(self.classifier.users, self.classifier.viable_users) | pandas.DataFrame.equals |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
| tm.assert_frame_equal(df, expected) | pandas.util.testing.assert_frame_equal |
import numpy as np
import pandas as pd
array1 = [1,2,3,4,5,6,7,8]
array2 = [1,3,5,7,9,11,13,15]
ds_array1 = pd.Series(array1)
ds_array2 = | pd.Series(array2) | pandas.Series |
# -*- coding: utf-8 -*-
import pandas as pd
import itertools
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--outdir", nargs='?', type=str, default="tmp_results", help="input output directory")
parser.add_argument("--input", nargs='?', type=str, default="results.tsv", help="input tsv file")
parser.add_argument("--format", nargs='?', type=str, default="all", help="formula format")
parser.add_argument("--setting", nargs='?', type=str, default="comp", help="test setting")
args = parser.parse_args()
df = pd.read_csv(args.outdir+"/"+args.input, sep="\t")
#print("total {0} examples".format(len(df)))
if args.setting == "comp":
#todo: change query setting according to the primitive quantifier
train1 = df.query('depth==0 and \
sentence.str.contains("one ")')
train2 = df.query('depth==0 and \
not sentence.str.contains("one ") and \
tags.str.contains("adjective:no") and \
tags.str.contains("adverb:no") and \
tags.str.contains("conjunction:no") and \
tags.str.contains("disjunction:no")')
test = df.query('depth==0 and \
not sentence.str.contains("one ") and \
(tags.str.contains("adjective:yes") or \
tags.str.contains("adverb:yes") or \
tags.str.contains("conjunction:yes") or \
tags.str.contains("disjunction:yes"))')
tr = pd.concat([train1, train2], axis=0)
train = tr.sample(frac=1)
if args.format == "fol":
train.drop(['vf', 'drs', 'vfpol', 'folpol'], axis=1).to_csv(args.outdir+"/"+"train_fol.tsv", sep="\t", index=False)
test.drop(['vf', 'drs', 'vfpol', 'folpol'], axis=1).to_csv(args.outdir+"/"+"test_fol.tsv", sep="\t", index=False)
elif args.format == "free":
train.drop(['fol', 'drs', 'vfpol', 'folpol'], axis=1).rename(columns={'vf':'fol'}).to_csv(args.outdir+"/"+"train_free.tsv", sep="\t", index=False)
test.drop(['fol', 'drs', 'vfpol', 'folpol'], axis=1).rename(columns={'vf':'fol'}).to_csv(args.outdir+"/"+"test_free.tsv", sep="\t", index=False)
elif args.format == "drs":
train.drop(['fol', 'vf', 'vfpol', 'folpol'], axis=1).rename(columns={'drs':'fol'}).to_csv(args.outdir+"/"+"train_drs.tsv", sep="\t", index=False)
test.drop(['fol', 'vf', 'vfpol', 'folpol'], axis=1).rename(columns={'drs':'fol'}).to_csv(args.outdir+"/"+"test_drs.tsv", sep="\t", index=False)
elif args.format == "all":
train.drop(['vf', 'drs', 'vfpol', 'folpol'], axis=1).to_csv(args.outdir+"/"+"train_fol.tsv", sep="\t", index=False)
test.drop(['vf', 'drs', 'vfpol', 'folpol'], axis=1).to_csv(args.outdir+"/"+"test_fol.tsv", sep="\t", index=False)
train.drop(['fol', 'drs', 'vfpol', 'folpol'], axis=1).rename(columns={'vf':'fol'}).to_csv(args.outdir+"/"+"train_free.tsv", sep="\t", index=False)
test.drop(['fol', 'drs', 'vfpol', 'folpol'], axis=1).rename(columns={'vf':'fol'}).to_csv(args.outdir+"/"+"test_free.tsv", sep="\t", index=False)
train.drop(['fol', 'vf', 'vfpol', 'folpol'], axis=1).rename(columns={'drs':'fol'}).to_csv(args.outdir+"/"+"train_drs.tsv", sep="\t", index=False)
test.drop(['fol', 'vf', 'vfpol', 'folpol'], axis=1).rename(columns={'drs':'fol'}).to_csv(args.outdir+"/"+"test_drs.tsv", sep="\t", index=False)
train_auto =pd.DataFrame(index=[], columns=["id", "depth", "auto_sent1", "auto_sent2", "auto_sem1", "auto_sem2", "tags"])
train_auto["id"] = train["id"]
train_auto["depth"] = train["depth"]
train_auto["auto_sent1"] = train["sentence"]
train_auto["auto_sent2"] = train["sentence"]
train_auto["auto_sem1"] = train["fol"]
train_auto["auto_sem2"] = train["fol"]
train_auto["tags"] = train["tags"]
train_auto.to_csv(args.outdir+"/"+"train_auto.tsv", sep="\t", index=False)
elif args.setting == "depth":
depth0 = df.query('depth==0', engine='python')
depth1 = df.query('depth==1', engine='python')
depth2 = df.query('depth==2', engine='python')
depth3 = df.query('depth==3', engine='python')
depth4 = df.query('depth==4', engine='python')
tr = | pd.concat([depth0, depth1], axis=0) | pandas.concat |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sn
# 设置最大显示行数为777
pd.options.display.max_columns = 777
# 读取Excel
students_one = | pd.read_excel('./TestExcel/AlbertData.xlsx',sheet_name='Sheet1',index_col='Index') | pandas.read_excel |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import copy
import pandas as pd
import numpy as np
import numpy.testing as npt
from skbio.util._testing import assert_data_frame_almost_equal
class MetadataMixinTests:
def test_constructor_invalid_type(self):
for md in (0, 'a', ('f', 'o', 'o'), np.array([]), | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import pickle
import time
import random
import os
from sklearn import linear_model, model_selection, ensemble
from sklearn.svm import SVC
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.base import clone
from sklearn import metrics
from sklearn.model_selection import cross_validate, train_test_split, StratifiedKFold
import sklearn.metrics as m
from joblib import Parallel, delayed
from sklearn.base import clone
from sklearn.utils import shuffle, resample
type_='marker'
basename = type_+'_features_expired_prediction_'
dir_ = '../../data/'
t0_all=time.time()
seed = 42
np.random.seed(seed)
max_depth = 1
C=1
tol=1e-3
min_samples_leaf=2
min_samples_split=2
n_estimators=100
models = {
"Logistic Regression" : linear_model.LogisticRegression(
C=C,
penalty='l1',
solver="liblinear",
tol=tol,
random_state=seed)
}
classification_metrics = ['roc_auc']
cv_split = 10
test_size = 0.15
n_jobs = 25
nboot=200
X_all_proteins = pd.read_csv(dir_+'integrated_X_raw_all_proteins.csv',index_col=0)
proteins_no_immunoglobulins = pickle.load(open(dir_+'proteins_no_immunoglobulins.pkl','rb'))
X_all_proteins = X_all_proteins.loc[:,proteins_no_immunoglobulins]
joined = pd.read_csv(dir_+'mortality_X_y.csv',index_col=0)
X_all_clinical = pd.read_csv(dir_+'integrated_X_clinical_and_cohort_covariates.csv',index_col=0)
Y_pgd = pd.read_csv(dir_+'integrated_pgd_y.csv',index_col=0,header=None)
Y_pgd.columns = ['PGD']
X_all_clinical = X_all_clinical.join(Y_pgd)
Y_mortality = joined[['expired']]
Y_mortality.index.name=''
X_all_clinical = X_all_clinical.join(Y_mortality)
Y_lvad = joined[['Mechanical_Support_Y']]
Y_lvad.index.name=''
Y_survival = (joined[['expired']]==0).astype(int)
Y_survival.columns = ['Survival']
Y_survival.index.name=''
X_all_clinical = X_all_clinical.join(Y_survival)
idmap_sub = pd.read_csv(dir_+'protein_gene_map_full.csv')[['Protein','Gene_name']].dropna()
cov_df = X_all_clinical.loc[:,['Cohort_Columbia','Cohort_Cedar']].copy().astype(int)
all_cov_df = cov_df.copy()
all_cov_df.loc[:,'Cohort_Paris'] = (
(all_cov_df['Cohort_Columbia'] +
all_cov_df['Cohort_Cedar'])==0).astype(int)
params = {'Y' : Y_survival, 'cv_split' : cv_split,
'metrics' : classification_metrics, 'n_jobs' : 1,
'test_size' : test_size,
'retrained_models' : True, 'patient_level_predictions' : True}
def permute(Y,seed=42):
"""
shuffle sample values
Parameters:
----------
Y : pandas series
Index of samples and values are their class labels
seed : int
Random seed for shuffling
Returns:
------
arr_shuffle: pandas series
A shuffled Y
"""
arr = shuffle(Y.values,random_state=seed)
arr_shuffle = (pd.Series(arr.reshape(1,-1)[0],index=Y.index))
return arr_shuffle
def observed_val(X,Y,models,metrics=['roc_auc'],cv_split=10,seed=42,test_size=0.15,return_train_score=False,n_jobs=1,retrained_models=False,patient_level_predictions=False,return_estimator=True):
# make sure given metrics are in list and not one metric is given as a string
if type(metrics)!=list:
metrics = [metrics]
# 1/ reindex
X = X.loc[Y.index]
dfs = []
model_retrained_fits = {}
model_confs = []
#iterate through model dictionary
for name,mod in models.items():
# /2 generate model parameters and fold scores with cv splitter
fit = clone(mod).fit(X,Y.values.reshape(1,-1)[0])
conf = pd.DataFrame({'y_true' : Y.values.reshape(1,-1)[0],'y_pred' : fit.predict(X),'bootstrap' : 'observed','model' : np.repeat(name,len(Y.index))},index=Y.index)
model_confs.append(conf)
#do prediction for each metric
tmp = pd.DataFrame({'model' : name,'bootstrap' : 'observed'},index=[0])
for metric in metrics:
tmp[metric] = m.SCORERS[metric](fit,X,Y)
model_retrained_fits[name] = fit
dfs.append(tmp)
return pd.concat(dfs).reset_index(drop=True), model_retrained_fits, pd.concat(model_confs)
def resample_observed_val(X,Y,models,metrics=['roc_auc'],cv_split=10,seed=42,test_size=0.15,return_train_score=False,n_jobs=1,retrained_models=False,patient_level_predictions=False,return_estimator=True):
# make sure given metrics are in list and not one metric is given as a string
if type(metrics)!=list:
metrics = [metrics]
# 1/ reindex
X = X.loc[Y.index]
Y_resample = resample(Y,random_state=seed)
X = X.loc[Y_resample.index]
Y = Y_resample.copy()
dfs = []
model_retrained_fits = {}
model_confs = []
#iterate through model dictionary
for name,mod in models.items():
# /2 generate model parameters and fold scores with cv splitter
fit = clone(mod).fit(X,Y.values.reshape(1,-1)[0])
conf = pd.DataFrame({'y_true' : Y.values.reshape(1,-1)[0],'y_pred' : fit.predict(X),'bootstrap' : 'observed','model' : np.repeat(name,len(Y.index))},index=Y.index)
model_confs.append(conf)
#do prediction for each metric
tmp = pd.DataFrame({'model' : name,'bootstrap' : 'observed'},index=[0])
for metric in metrics:
tmp[metric] = m.SCORERS[metric](fit,X,Y)
model_retrained_fits[name] = fit
dfs.append(tmp)
return pd.concat(dfs).reset_index(drop=True), model_retrained_fits, pd.concat(model_confs)
def permuted_observed_val(X,Y,models,metrics=['roc_auc'],cv_split=10,seed=42,test_size=0.15,return_train_score=False,n_jobs=1,retrained_models=False,patient_level_predictions=False,return_estimator=True):
# make sure given metrics are in list and not one metric is given as a string
if type(metrics)!=list:
metrics = [metrics]
# 1/ reindex
X = X.loc[Y.index]
Y_shuffle = permute(Y,seed=seed)
X = X.loc[Y_shuffle.index]
Y = Y_shuffle.copy()
dfs = []
model_retrained_fits = {}
model_confs = []
#iterate through model dictionary
for name,mod in models.items():
# /2 generate model parameters and fold scores with cv splitter
fit = clone(mod).fit(X,Y.values.reshape(1,-1)[0])
conf = pd.DataFrame({'y_true' : Y.values.reshape(1,-1)[0],'y_pred' : fit.predict(X),'bootstrap' : 'observed','model' : np.repeat(name,len(Y.index))},index=Y.index)
model_confs.append(conf)
#do prediction for each metric
tmp = pd.DataFrame({'model' : name,'bootstrap' : 'observed'},index=[0])
for metric in metrics:
tmp[metric] = m.SCORERS[metric](fit,X,Y)
model_retrained_fits[name] = fit
dfs.append(tmp)
return pd.concat(dfs).reset_index(drop=True), model_retrained_fits, pd.concat(model_confs)
def train_test_val_top_fold_01_within(X,Y,models,metrics=['roc_auc'],cv_split=10,seed=42,test_size=0.15,return_train_score=True,n_jobs=1,retrained_models=False,patient_level_predictions=False,return_estimator=True):
# make sure given metrics are in list and not one metric given as a string
if type(metrics)!=list:
metrics = [metrics]
# 1/ train and test split
X = X.loc[Y.index]
X_train, X_test, y_train, y_test = train_test_split(X,Y,
test_size=test_size,
random_state=seed,
stratify=Y,
shuffle=True)
X_train = X_train.apply(lambda x : (x - min(x))/(max(x) - min(x)),axis=0)
X_test = X_test.apply(lambda x : (x - min(x))/(max(x) - min(x)),axis=0)
X_train[X_train.isna()]=0
X_test[X_test.isna()]=0
#define K fold splitter
cv = StratifiedKFold(n_splits=cv_split,random_state=seed,shuffle=True)
#Instantiate lists to collect prediction and model results
dfs = []
model_retrained_fits = {}
model_confs = []
#iterate through model dictionary
for name,mod in models.items():
# /2 generate model parameters and fold scores with cv splitter
fit = cross_validate(clone(mod),X_train,y_train.values.reshape(1,-1)[0],cv=cv,scoring=metrics,
n_jobs=n_jobs,return_train_score=return_train_score,
return_estimator=return_estimator)
tmp = pd.DataFrame({'fold' : range(cv_split),
'model' : name},
index=range(cv_split))
#populate scores in dataframe
cols = [k for k in fit.keys() if (k.find('test')+k.find('train'))==-1]
for col in cols:
tmp[col] = fit[col]
# /3 Identify best performing model
top_fold = np.where(fit['test_roc_auc']==fit['test_roc_auc'].max())[0][0]
keys = [x for x in fit.keys()]
vals = [fit[x][top_fold] for x in keys]
top_model_key_vals = {}
for i in range(len(vals)):
top_model_key_vals[keys[i]] = vals[i]
#4/ train models on training set
# also get sample level predictions
f = top_model_key_vals['estimator']
fitted = clone(f).fit(X_train,y_train.values.reshape(1,-1)[0])
conf = pd.DataFrame({'y_true' : y_test.values.reshape(1,-1)[0],
'y_pred' : fitted.predict(X_test),
'y_proba' : fitted.predict_proba(X_test)[:,1],
'bootstrap' : np.repeat(seed,len(y_test.index)),
'model' : np.repeat(name,len(y_test.index))},
index=y_test.index)
model_confs.append(conf)
#do prediction for each metric
for metric in metrics:
tmp['validation_'+metric] = m.SCORERS[metric](fitted,X_test,y_test)
model_retrained_fits[name] = fitted
dfs.append(tmp.query('fold==@top_fold').drop('fold',1))
return pd.concat(dfs,sort=True).reset_index(drop=True), model_retrained_fits, pd.concat(model_confs,sort=True)
def permuted_train_test_val_top_fold_01_within(X,Y,models,metrics=['roc_auc'],cv_split=10,seed=42,test_size=0.15,return_train_score=True,n_jobs=1,retrained_models=False,patient_level_predictions=False,return_estimator=True):
X = X.loc[Y.index]
Y_shuffle = permute(Y,seed=seed)
X_shuffle = X.loc[Y_shuffle.index]
# make sure given metrics are in list and not one metric given as a string
if type(metrics)!=list:
metrics = [metrics]
# 1/ train and test split
X_train, X_test, y_train, y_test = train_test_split(X_shuffle,Y_shuffle,
test_size=test_size,
random_state=seed,
stratify=Y,
shuffle=True)
X_train = X_train.apply(lambda x : (x - min(x))/(max(x) - min(x)),axis=0)
X_test = X_test.apply(lambda x : (x - min(x))/(max(x) - min(x)),axis=0)
X_train[X_train.isna()]=0
X_test[X_test.isna()]=0
#define K fold splitter
cv = StratifiedKFold(n_splits=cv_split,random_state=seed,shuffle=True)
#Instantiate lists to collect prediction and model results
dfs = []
model_retrained_fits = {}
model_confs = []
#iterate through model dictionary
for name,mod in models.items():
# /2 generate model parameters and fold scores with cv splitter
fit = cross_validate(clone(mod),X_train,y_train.values.reshape(1,-1)[0],cv=cv,scoring=metrics,
n_jobs=n_jobs,return_train_score=return_train_score,
return_estimator=return_estimator)
tmp = pd.DataFrame({'fold' : range(cv_split),
'model' : name},
index=range(cv_split))
#populate scores in dataframe
cols = [k for k in fit.keys() if (k.find('test')+k.find('train'))==-1]
for col in cols:
tmp[col] = fit[col]
# /3 Identify best performing model
top_fold = np.where(fit['test_roc_auc']==fit['test_roc_auc'].max())[0][0]
keys = [x for x in fit.keys()]
vals = [fit[x][top_fold] for x in keys]
top_model_key_vals = {}
for i in range(len(vals)):
top_model_key_vals[keys[i]] = vals[i]
#4/ train models on training set
# also get sample level predictions
f = top_model_key_vals['estimator']
fitted = clone(f).fit(X_train,y_train.values.reshape(1,-1)[0])
conf = pd.DataFrame({'y_true' : y_test.values.reshape(1,-1)[0],
'y_pred' : fitted.predict(X_test),
'y_proba' : fitted.predict_proba(X_test)[:,1],
'bootstrap' : np.repeat(seed,len(y_test.index)),
'model' : np.repeat(name,len(y_test.index))},
index=y_test.index)
model_confs.append(conf)
#do prediction for each metric
for metric in metrics:
tmp['validation_'+metric] = m.SCORERS[metric](fitted,X_test,y_test)
model_retrained_fits[name] = fitted
dfs.append(tmp.query('fold==@top_fold').drop('fold',1))
return pd.concat(dfs,sort=True).reset_index(drop=True), model_retrained_fits, pd.concat(model_confs,sort=True)
def bootstrap_of_fcn(func=None,params={},n_jobs=4,nboot=2):
if func==None:
return "Need fcn to bootstrap"
parallel = Parallel(n_jobs=n_jobs)
return parallel(
delayed(func)(
seed=k,**params)
for k in range(nboot))
def get_performance(lst):
perf = (pd.
concat(lst,keys=range(len(lst))).
reset_index(level=1,drop=True).
rename_axis('bootstrap').
reset_index()
)
return perf
def model_feature_importances(boot_mods):
dfs = []
X = params['X'].copy()
X.loc[:,'Intercept'] = 0
for i in range(len(boot_mods)):
for j in boot_mods[i].keys():
mod = boot_mods[i][j]
coef = []
try:
coef.extend([i for i in mod.feature_importances_])
except:
coef.extend([i for i in mod.coef_[0]])
coef.extend(mod.intercept_)
fs = []
fs.extend(X.columns.values)
df = pd.DataFrame({
'Feature' : fs,
'Gene_name' : (X.T.
join(idmap_sub.
set_index('Protein'),how='left').
Gene_name.values),
'Importance' : coef,
'Model' : j,
'Bootstrap' : i
})
dfs.append(df)
return pd.concat(dfs,sort=True)
def patient_predictions(lst):
col = pd.concat(lst).index.name
dat = \
(pd.
concat(
lst
).
reset_index().
rename(columns={col : 'Sample'}).
set_index('Sample').
join(all_cov_df).
reset_index().
melt(id_vars=['Sample','bootstrap','model','y_true','y_pred','y_proba'],
var_name='cohort',value_name='mem')
)
dat.cohort = dat.cohort.str.split('_').apply(lambda x : x[1])
dat = dat[dat.mem==1].drop('mem',1).reset_index(drop=True)
return dat
def get_performance(lst):
perf = (pd.
concat(lst,keys=range(len(lst))).
reset_index(level=1,drop=True).
rename_axis('bootstrap').
reset_index()
)
return perf
def model_feature_importances(boot_mods):
dfs = []
X = params['X'].copy()
X.loc[:,'Intercept'] = 0
for i in range(len(boot_mods)):
for j in boot_mods[i].keys():
mod = boot_mods[i][j]
coef = []
try:
coef.extend([i for i in mod.feature_importances_])
except:
coef.extend([i for i in mod.coef_[0]])
coef.extend(mod.intercept_)
fs = []
fs.extend(X.columns.values)
df = pd.DataFrame({
'Feature' : fs,
'Gene_name' : (X.T.
join(idmap_sub.
set_index('Protein'),how='left').
Gene_name.values),
'Importance' : coef,
'Model' : j,
'Bootstrap' : i
})
dfs.append(df)
return pd.concat(dfs,sort=True)
def patient_predictions(lst):
col = pd.concat(lst).index.name
dat = \
(pd.
concat(
lst
).
reset_index().
rename(columns={col : 'Sample'}).
set_index('Sample').
join(all_cov_df).
reset_index().
melt(id_vars=['Sample','bootstrap','model','y_true','y_pred','y_proba'],
var_name='cohort',value_name='mem')
)
dat.cohort = dat.cohort.str.split('_').apply(lambda x : x[1])
dat = dat[dat.mem==1].drop('mem',1).reset_index(drop=True)
return dat
import itertools
clin_combos = [[list(i) for i in itertools.combinations(
np.intersect1d(
X_all_clinical.columns.values,
X_all_clinical.columns.values),r)
] for r in np.arange(1,2)]
prot_combos = [[list(i) for i in itertools.combinations(
np.intersect1d(
X_all_proteins.columns.values,
X_all_proteins.columns.values),r)
] for r in np.arange(1,2)]
all_clin_1 = list(np.concatenate(list(itertools.chain(*clin_combos))))
print(len(all_clin_1))
all_prot_1 = list(np.concatenate(list(itertools.chain(*prot_combos))))
print(len(all_prot_1))
all_clin_1_and_prot_1 = list(
itertools.chain(*[all_clin_1,all_prot_1])
)
print(len(all_clin_1_and_prot_1))
all_clin_1_prot_1 = list(
itertools.chain(*
[[list(itertools.chain(*[[x],[y]])) for x in all_prot_1] for y in all_clin_1]
)
)
print(len(all_clin_1_prot_1))
all_clin_1_prot_1_and_clin_1_and_prot_1 = list(
itertools.chain(*[all_clin_1,all_prot_1,all_clin_1_prot_1])
)
print(len(all_clin_1_prot_1_and_clin_1_and_prot_1))
all_clin_2 = [list(i) for i in itertools.combinations(all_clin_1,2)]
print(len(all_clin_2))
all_prot_2 = [list(i) for i in itertools.combinations(all_prot_1,2)]
print(len(all_prot_2))
all_clin_1_prot_1_and_prot_2 = list(
itertools.chain(*[all_clin_1_prot_1,all_prot_2])
)
len(all_clin_1_prot_1_and_prot_2)
all_clin_2_and_clin_1_prot_1_and_prot_2 = list(
itertools.chain(*[all_clin_2,all_clin_1_prot_1,all_prot_2])
)
len(all_clin_2_and_clin_1_prot_1_and_prot_2)
all_clin_2_and_clin_1_prot_1_and_prot_2_and_clin_1_and_prot_1 = list(
itertools.chain(*[all_clin_2,all_clin_1_prot_1,all_prot_2,all_clin_1,all_prot_1])
)
print(len(all_clin_2_and_clin_1_prot_1_and_prot_2_and_clin_1_and_prot_1))
t0 = time.time()
fimps_dfs = []
perf_dfs = []
ppreds_dfs = []
perm_fimps_dfs = []
perm_perf_dfs = []
perm_ppreds_dfs = []
feature_set = {}
for i,features in enumerate(all_clin_1_and_prot_1):
if features in all_cov_df.columns:
continue
print(features)
print(i)
X_all = X_all_proteins.join(X_all_clinical)
if type(features)==np.str_:
X = X_all[[features]]
if type(features)==list:
X = X_all[features]
feature_set[str(i)] = X.columns.tolist()
params.update({'X' : X.join(all_cov_df),'models' : models.copy()})
lst = bootstrap_of_fcn(func=train_test_val_top_fold_01_within,
params=params,n_jobs=n_jobs,nboot=nboot)
perf = get_performance([lst[i][0] for i in range(len(lst))])
perf['set'] = str(i)
perf_dfs.append(perf)
fimps = model_feature_importances([lst[i][1] for i in range(len(lst))])
fimps['set'] = str(i)
fimps_dfs.append(fimps)
ppreds = patient_predictions([lst[i][2] for i in range(len(lst))])
ppreds['set'] = str(i)
ppreds_dfs.append(ppreds)
lst = bootstrap_of_fcn(func=permuted_train_test_val_top_fold_01_within,
params=params,n_jobs=n_jobs,nboot=nboot)
perm_perf = get_performance([lst[i][0] for i in range(len(lst))])
perm_perf['set'] = str(i)
perm_perf_dfs.append(perm_perf)
perm_fimps = model_feature_importances([lst[i][1] for i in range(len(lst))])
perm_fimps['set'] = str(i)
perm_fimps_dfs.append(perm_fimps)
perm_ppreds = patient_predictions([lst[i][2] for i in range(len(lst))])
perm_ppreds['set'] = str(i)
perm_ppreds_dfs.append(perm_ppreds)
perf_df = (pd.concat(perf_dfs).
groupby(['set'])['validation_roc_auc'].
describe(percentiles=[0.025,0.975]).
loc[:,['2.5%','mean','97.5%']].
sort_values('2.5%',ascending=False).
reset_index()
)
fimps_df = (pd.concat(fimps_dfs).
groupby(['set','Feature'])['Importance'].
describe(percentiles=[0.025,0.975]).
loc[:,['2.5%','mean','97.5%']].
sort_values('2.5%',ascending=False).
reset_index()
)
ppreds_df = (pd.concat(ppreds_dfs))
perm_perf_df = (pd.concat(perm_perf_dfs).
groupby(['set'])['validation_roc_auc'].
describe(percentiles=[0.025,0.975]).
loc[:,['2.5%','mean','97.5%']].
sort_values('2.5%',ascending=False).
reset_index()
)
perm_fimps_df = (pd.concat(perm_fimps_dfs).
groupby(['set','Feature'])['Importance'].
describe(percentiles=[0.025,0.975]).
loc[:,['2.5%','mean','97.5%']].
sort_values('2.5%',ascending=False).
reset_index()
)
perm_ppreds_df = (pd.concat(perm_ppreds_dfs))
t1_all = time.time()
print(np.round( (t1_all - t0_all) / 60, 2 ) )
perf_df = (
perf_df.
set_index('set').
join(
pd.DataFrame(
feature_set.items(),columns=['set','set_features']
).
set_index('set')
).
sort_values('2.5%')
)
perf_df.to_csv(dir_+'mortality_predictions_'+type_+'_performance_survival_wcovs.csv')
fimps_df = (
fimps_df.
set_index('set').
join(
pd.DataFrame(
feature_set.items(),columns=['set','set_features']
).
set_index('set')
).
sort_values('2.5%')
)
fimps_df.to_csv(dir_+'mortality_predictions_'+type_+'_feature_importance_survival_wcovs.csv')
ppreds_df.to_csv(dir_+'mortality_predictions_'+type_+'_patient_predictions_survival_wcovs.csv')
pd.concat(fimps_dfs).to_csv(dir_+'mortality_predictions_'+type_+'_full_feature_importance_survival_wcovs.csv')
| pd.concat(perm_fimps_dfs) | pandas.concat |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsRegressor
datos_originales = pd.read_csv('bones_mineral_density.csv')
datos = datos_originales[['age', 'gender', 'spnbmd']]
datos_male = datos[datos['gender']=='male']
datos_male = datos_male.sort_values('age')
n_male, p = datos_male.shape
x_male = datos_male['age']
y_male = datos_male['spnbmd']
datos_female = datos[datos['gender']=='female']
datos_female = datos_female.sort_values('age')
n_female, p = datos_female.shape
x_female = datos_female['age']
y_female = datos_female['spnbmd']
knn_1_male = KNeighborsRegressor(n_neighbors=1)
knn_1_male.fit(pd.DataFrame(x_male), y_male)
knn_1_female = KNeighborsRegressor(n_neighbors=1)
knn_1_female.fit(pd.DataFrame(x_female), y_female)
x_vect = pd.DataFrame(pd.Series(np.linspace(9, 26, 100), name='age'))
male_predict = knn_1_male.predict(x_vect)
female_predict = knn_1_female.predict(x_vect)
plt.figure(figsize=(10,7.5))
sns.scatterplot(data=datos, x='age', y='spnbmd', hue='gender')
plt.plot(x_vect, male_predict, color = 'DodgerBlue', linewidth = 2)
plt.plot(x_vect, female_predict, color = 'DeepPink', linewidth = 2)
plt.legend()
# choose k between 1 to 31
k_range = range(1, 31)
k_scores = []# use iteration to caclulator different k in models, then return the average accuracy based on the cross validation
for k in k_range:
knn = KNeighborsRegressor(n_neighbors=k)
scores = cross_val_score(knn, pd.DataFrame(x_male), y_male, cv=5)
k_scores.append(scores.mean())# plot to see clearly
plt.plot(k_range, k_scores)
plt.xlabel('Value of K for KNN')
plt.ylabel('Cross-Validated Accuracy')
k_male_star = np.where(k_scores == max(k_scores))[0][0]+1
# choose k between 1 to 31
k_range = range(1, 31)
k_scores = []# use iteration to caclulator different k in models, then return the average accuracy based on the cross validation
for k in k_range:
knn = KNeighborsRegressor(n_neighbors=k)
scores = cross_val_score(knn, pd.DataFrame(x_female), y_female, cv=5)
k_scores.append(scores.mean())# plot to see clearly
plt.plot(k_range, k_scores)
plt.xlabel('Value of K for KNN')
plt.ylabel('Cross-Validated Accuracy')
k_female_star = np.where(k_scores == max(k_scores))[0][0]+1
k_male_star
k_female_star
knn_2_male = KNeighborsRegressor(n_neighbors=k_male_star)
knn_2_male.fit(pd.DataFrame(x_male), y_male)
knn_2_female = KNeighborsRegressor(n_neighbors=k_female_star)
knn_2_female.fit( | pd.DataFrame(x_female) | pandas.DataFrame |
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from nltk.stem.porter import *
import string
from operator import itemgetter
# Importing Gensim
import gensim
from gensim import corpora, models
from gensim.models.coherencemodel import CoherenceModel
import pandas as pd
class TextAnalysis(object):
def __init__(self):
pass
def tokenize(self, input_files, param, tool_id):
data_to_return = {"data":{}}
ok_to_process = False
# Check the tool needs
# -----
if "d-gen-text" in input_files:
if len(input_files["d-gen-text"]):
ok_to_process = True
if not ok_to_process:
res_err = {"data":{}}
res_err["data"]["error"] = "Input data missing!"
return res_err
# -----
# Params
# -----
p_stopwords = "none" #string e.g. "English"
p_lemmatize_lang = "none"
if param != None:
if "p-defstopwords" in param:
p_stopwords = str(param["p-defstopwords"])
if "p-deflemmatize" in param:
p_lemmatize_lang = str(param["p-deflemmatize"])
# Data (Input Documents)
# use pandas and convert to DataFrame
# -----
documents = {}
for file_k in input_files["d-gen-text"]:
documents[file_k] = input_files["d-gen-text"][file_k]
docs_df = pd.DataFrame.from_dict(documents, orient='index', columns=["content"])
def read_csv_rows(obj_data):
res = []
for file_name in obj_data:
for a_row in obj_data[file_name]:
for a_val in a_row:
#normalize and append
a_val = a_val.lower()
a_val = a_val.strip()
res.append(a_val)
return res
stopwords_data = set()
if "d-stopwords" in input_files:
if len(input_files["d-stopwords"]) > 0:
for f in input_files["d-stopwords"]:
stopwords_data = stopwords_data.union(set(read_csv_rows(input_files["d-stopwords"])))
tokens_data = set()
if "d-tokens" in input_files:
if len(input_files["d-tokens"]) > 0:
for f in input_files["d-tokens"]:
tokens_data = tokens_data.union(set(read_csv_rows(input_files["d-tokens"])))
if p_stopwords != "none":
stopwords_data = stopwords_data.union(set(stopwords.words(p_stopwords)))
def lemmatize_stemming(text,lang):
#lemmatize only if the token includes one word
if (" " not in text) and ("-" not in text):
stemmer = SnowballStemmer(lang)
return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v'))
else:
return text
def adhoc_tokens_list(l_token):
if len(l_token) == 1:
token = l_token[0]
if "-" in token:
new_l = []
for elem in token.split("-"):
if elem != "":
new_l.append(elem)
return adhoc_tokens_list(new_l)
elif " " in token:
new_l = []
for elem in token.split(" "):
if elem != "":
new_l.append(elem)
return adhoc_tokens_list(new_l)
else:
return l_token
else:
mid = len(l_token)//2
return adhoc_tokens_list(l_token[:mid]) + adhoc_tokens_list(l_token[mid:])
def update_tokenlist(org_token_l, adhoc_token_l, adhoc_token):
index = 0
res = []
while (len(org_token_l) - index) >= len(adhoc_token_l):
part_of_org = org_token_l[index:index+len(adhoc_token_l)]
if adhoc_token_l == part_of_org:
#res += part_of_org + [adhoc_token] #in case i want to include the single words
res.append(adhoc_token) #in case i want to include only the adhoc token
index += len(adhoc_token_l)
else:
res.append(org_token_l[index])
index += 1
res += org_token_l[index:]
return res
# giving a text this function
# (1) creates a list with all the single words ;
#(2) includes the ad-hoc defined tokens;
#(3) lemmatizes and removes the stopwords
def tokenize_text(text):
result = []
#add the automatic detected tokens
for token in gensim.utils.simple_preprocess(text):
result.append(token)
#add the ad-hoc tokens
for ad_hoc_token in tokens_data:
adhoc_token_l = adhoc_tokens_list([ad_hoc_token])
result = update_tokenlist(result, adhoc_token_l, ad_hoc_token)
#remove stopwords and create lemmatize form
clean_result = []
for token in result:
if token not in stopwords_data and len(token) > 3:
if p_lemmatize_lang != "none":
clean_result.append(lemmatize_stemming(token,p_lemmatize_lang))
else:
clean_result.append(token)
return clean_result
processed_docs = docs_df['content'].map(tokenize_text)
processed_docs_ldict = []
for k,doc in processed_docs.items():
processed_docs_ldict.append({"index":k,"value":doc})
data_to_return["data"]["d-processed-corpus"] = {"processed_corpus": processed_docs_ldict}
return data_to_return
def build_corpus(self, input_files, param, tool_id):
data_to_return = {"data":{}}
ok_to_process = False
#Check the MUST Prerequisite
# Check Restrictions
if "d-processed-corpus" in input_files:
if len(input_files["d-processed-corpus"]):
ok_to_process = True
if not ok_to_process:
res_err = {"data":{}}
res_err["data"]["error"] = "Input data missing!"
return res_err
# convert to data series
indexes = []
values = []
# in this case i expect only 1 file
for file_k in input_files["d-processed-corpus"]:
for d in input_files["d-processed-corpus"][file_k]:
indexes.append(d["index"])
values.append(d["value"])
processed_docs = pd.Series(values, index =indexes)
#The params
#---------
p_model = None #string e.g. "English"
if param != None:
if "p-corpusmodel" in param:
p_model = str(param["p-corpusmodel"])
# -> Create the dictionary of words containing the number of times a word appears in the training set
# -> Filter out tokens that appear in: (a) less than 15 documents; OR (b) more than 0.5 documents
# -> and keep only the first 100000 most frequent tokens.
# -----
dictionary = gensim.corpora.Dictionary(processed_docs)
#dictionary.filter_extremes()
index_corpus = []
vec_corpus = []
for k,doc in processed_docs.items():
vec_corpus.append(dictionary.doc2bow(doc))
index_corpus.append(k)
# TF-IDF
if p_model == "tfidf":
tfidf = models.TfidfModel(vec_corpus)
vec_corpus = tfidf[vec_corpus]
#The returned data must include a recognizable key and the data associated to it
# -----
vec_corpus_ldict = []
for i in range(0,len(vec_corpus)):
vec_corpus_ldict.append({"index":index_corpus[i],"value":vec_corpus[i]})
data_to_return["data"]["d-model-corpus"] = {"modelled_corpus": vec_corpus_ldict}
data_to_return["data"]["d-dictionary-corpus"] = {"dictionary": dictionary}
return data_to_return
def lda(self, input_files, param, tool_id):
data_to_return = {"data":{}}
ok_to_process = False
# Check the tool needs
# -----
if "d-model-corpus" in input_files and "d-dictionary-corpus" in input_files:
ok_to_process = len(input_files["d-model-corpus"]) and len(input_files["d-dictionary-corpus"])
if not ok_to_process:
res_err = {"data":{}}
res_err["data"]["error"] = "Input data missing!"
return res_err
corpus = []
for file_k in input_files["d-model-corpus"]:
for d in input_files["d-model-corpus"][file_k]:
corpus.append(d["value"])
dictionary = None
for file_k in input_files["d-dictionary-corpus"]:
dictionary = input_files["d-dictionary-corpus"][file_k]
# Params
# -----
p_num_topics = 2 #int number
if param != None:
if "p-topic" in param:
p_num_topics = int(param["p-topic"])
# Running LDA
# -----
try:
ldamodel = gensim.models.LdaMulticore(corpus, eval_every = 1, num_topics=p_num_topics, id2word=dictionary, passes=5, workers=2)
except:
res_err = {"data":{}}
res_err["data"]["error"] = "Incompatible data have been given as input to the LDA algorithm"
return res_err
data_to_return["data"]["d-gensimldamodel"] = {"ldamodel": ldamodel}
return data_to_return
def doc_prop_topics(self, input_files, param, tool_id):
data_to_return = {"data":{}}
ok_to_process = False
# Check the tool needs
# -----
if "d-model-corpus" in input_files and "d-gensimldamodel" in input_files:
ok_to_process = len(input_files["d-model-corpus"]) and len(input_files["d-gensimldamodel"])
if not ok_to_process:
res_err = {"data":{}}
res_err["data"]["error"] = "Input data missing!"
return res_err
corpus = []
corpus_doc_index = []
for file_k in input_files["d-model-corpus"]:
for d in input_files["d-model-corpus"][file_k]:
corpus.append(d["value"])
corpus_doc_index.append(d["index"])
ldamodel = None
for file_k in input_files["d-gensimldamodel"]:
ldamodel = input_files["d-gensimldamodel"][file_k]
# Params
# -----
def _doc_topics(ldamodel, corpus, corpus_doc_index):
## doc_topics_l -> [ [0.23,0.4, ... <num_topics> ] [] [] ... []]
doc_topics = ldamodel.get_document_topics(corpus, minimum_probability=0)
doc_topics_l = []
for l in doc_topics:
doc_topics_l.append([tup[1] for tup in l])
return pd.DataFrame(doc_topics_l, columns = list(range(1, ldamodel.num_topics + 1)), index = corpus_doc_index)
df_doc_topics = _doc_topics(ldamodel,corpus, corpus_doc_index)
df_doc_topics.index.names = ['doc']
df_doc_topics = df_doc_topics.reset_index()
l_doc_topics = [df_doc_topics.columns.values.tolist()] + df_doc_topics.values.tolist()
data_to_return["data"]["d-doc-topics-table"] = {"doc_topics": l_doc_topics}
return data_to_return
def words_prop_topics(self, input_files, param, tool_id):
data_to_return = {"data":{}}
ok_to_process = False
# Check the tool needs
# -----
if "d-model-corpus" in input_files and "d-gensimldamodel" in input_files:
ok_to_process = len(input_files["d-model-corpus"]) and len(input_files["d-gensimldamodel"])
if not ok_to_process:
res_err = {"data":{}}
res_err["data"]["error"] = "Input data missing!"
return res_err
corpus = []
corpus_doc_index = []
for file_k in input_files["d-model-corpus"]:
for d in input_files["d-model-corpus"][file_k]:
corpus.append(d["value"])
corpus_doc_index.append(d["index"])
ldamodel = None
for file_k in input_files["d-gensimldamodel"]:
ldamodel = input_files["d-gensimldamodel"][file_k]
# Params
# -----
topnum_words = 10 #int number
if param != None:
if "p-numwords" in param:
topnum_words = int(param["p-numwords"])
def _word_topics(ldamodel, corpus, corpus_doc_index):
topics = []
for t_index in range(0, ldamodel.num_topics):
wp = ldamodel.show_topic(t_index, topn=topnum_words)
topic_keywords = [[t_index + 1,word,prop] for word, prop in wp]
topic_keywords = sorted(topic_keywords, key=itemgetter(2), reverse=True)
topics = topics + topic_keywords
return | pd.DataFrame(topics, columns = ["topic","word","prop"]) | pandas.DataFrame |
import pandas as pd
import pytest
from rdtools.normalization import normalize_with_expected_power
from pandas import Timestamp
import numpy as np
@pytest.fixture()
def times_15():
return pd.date_range(start='20200101 12:00', end='20200101 13:00', freq='15T')
@pytest.fixture()
def times_30():
return pd.date_range(start='20200101 12:00', end='20200101 13:00', freq='30T')
@pytest.fixture()
def pv_15(times_15):
return pd.Series([1.0, 2.5, 3.0, 2.2, 2.1], index=times_15)
@pytest.fixture()
def expected_15(times_15):
return pd.Series([1.2, 2.3, 2.8, 2.1, 2.0], index=times_15)
@pytest.fixture()
def irradiance_15(times_15):
return pd.Series([1000.0, 850.0, 950.0, 975.0, 890.0], index=times_15)
@pytest.fixture()
def pv_30(times_30):
return pd.Series([1.0, 3.0, 2.1], index=times_30)
@pytest.fixture()
def expected_30(times_30):
return pd.Series([1.2, 2.8, 2.0], index=times_30)
@pytest.fixture()
def irradiance_30(times_30):
return pd.Series([1000.0, 950.0, 890.0], index=times_30)
def test_normalize_with_expected_power_uniform_frequency(pv_15, expected_15, irradiance_15):
norm, insol = normalize_with_expected_power(
pv_15, expected_15, irradiance_15)
expected_norm = pd.Series(
{Timestamp('2020-01-01 12:15:00', freq='15T'): 1.0,
Timestamp('2020-01-01 12:30:00', freq='15T'): 1.0784313725490198,
Timestamp('2020-01-01 12:45:00', freq='15T'): 1.0612244897959184,
Timestamp('2020-01-01 13:00:00', freq='15T'): 1.0487804878048783}
)
expected_norm.name = 'energy_Wh'
expected_norm.index.freq = '15T'
expected_insol = pd.Series(
{Timestamp('2020-01-01 12:15:00', freq='15T'): 231.25,
Timestamp('2020-01-01 12:30:00', freq='15T'): 225.0,
Timestamp('2020-01-01 12:45:00', freq='15T'): 240.625,
Timestamp('2020-01-01 13:00:00', freq='15T'): 233.125}
)
expected_insol.name = 'energy_Wh'
expected_insol.index.freq = '15T'
pd.testing.assert_series_equal(norm, expected_norm)
pd.testing.assert_series_equal(insol, expected_insol)
def test_normalize_with_expected_power_energy_option(pv_15, expected_15, irradiance_15):
norm, insol = normalize_with_expected_power(
pv_15, expected_15, irradiance_15, pv_input='energy')
expected_norm = pd.Series(
{Timestamp('2020-01-01 12:00:00', freq='15T'): np.nan,
Timestamp('2020-01-01 12:15:00', freq='15T'): 5.714285714285714,
Timestamp('2020-01-01 12:30:00', freq='15T'): 4.705882352941177,
Timestamp('2020-01-01 12:45:00', freq='15T'): 3.5918367346938775,
Timestamp('2020-01-01 13:00:00', freq='15T'): 4.097560975609756}
)
expected_norm.name = 'energy_Wh'
expected_norm.index.freq = '15T'
expected_insol = pd.Series(
{Timestamp('2020-01-01 12:00:00', freq='15T'): np.nan,
Timestamp('2020-01-01 12:15:00', freq='15T'): 231.25,
Timestamp('2020-01-01 12:30:00', freq='15T'): 225.0,
Timestamp('2020-01-01 12:45:00', freq='15T'): 240.625,
Timestamp('2020-01-01 13:00:00', freq='15T'): 233.125}
)
expected_insol.name = 'energy_Wh'
expected_insol.index.freq = '15T'
pd.testing.assert_series_equal(norm, expected_norm)
pd.testing.assert_series_equal(insol, expected_insol)
def test_normalize_with_expected_power_low_freq_pv(pv_30, expected_15, irradiance_15):
norm, insol = normalize_with_expected_power(
pv_30, expected_15, irradiance_15)
expected_norm = pd.Series(
{Timestamp('2020-01-01 12:30:00', freq='30T'): 0.9302325581395349,
Timestamp('2020-01-01 13:00:00', freq='30T'): 1.1333333333333333}
)
expected_norm.name = 'energy_Wh'
expected_norm.index.freq = '30T'
expected_insol = pd.Series(
{Timestamp('2020-01-01 12:30:00', freq='30T'): 456.25,
Timestamp('2020-01-01 13:00:00', freq='30T'): 473.75}
)
expected_insol.name = 'energy_Wh'
expected_insol.index.freq = '30T'
pd.testing.assert_series_equal(norm, expected_norm)
pd.testing.assert_series_equal(insol, expected_insol)
def test_normalized_with_expected_power_low_freq_expected(pv_15, expected_30, irradiance_30):
norm, insol = normalize_with_expected_power(
pv_15, expected_30, irradiance_30)
expected_norm = pd.Series(
{ | Timestamp('2020-01-01 12:15:00', freq='15T') | pandas.Timestamp |
"""
Load the data from different cohorts separately.
"""
import logging
import torch
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.metrics.pairwise import cosine_similarity
from statsmodels import robust
from torch_geometric.utils import dense_to_sparse, to_dense_batch
from torch_geometric.data import Data, DataLoader
from torch.utils.data import Dataset
def construct_graph(opt, features, keep_idx):
######################################
########### Similarity graph #########
######################################
if opt.which_graph == "similarity":
logging.info("Graph initialization with similarity matrix.")
similarity_matrix = cosine_similarity(features.T)
# print(np.max(similarity_matrix))
# pd.DataFrame(similarity_matrix, index=feat_name, columns=feat_name).to_csv("similarity.csv")
# print("mean similarity:", np.mean(similarity_matrix))
adj_thresh = 0.9766250264658827
adj_matrix = torch.LongTensor(np.where(similarity_matrix > adj_thresh, 1, 0))
edge_index = dense_to_sparse(torch.LongTensor(adj_matrix))[0]
print("edge index:", edge_index)
logging.info("Number of edges: {:04d}\n".format(edge_index.shape[1]))
logging.info('Number of singleton nodes: {:04d}\n'.format(
torch.sum(torch.sum(adj_matrix, dim=1) == 1).detach().numpy()))
edge_matrix = edge_index.cpu().detach().numpy()
pd.DataFrame(edge_matrix).to_csv('./similarity_graphs/' + 'TCGA_sim_graph.csv')
print(edge_matrix)
return adj_matrix, edge_index
def load_train_data():
root_path = './CRC_dataset/'
file_path = root_path + 'tcga.txt'
train_data = pd.read_table(file_path, sep=' ', header=0)
sample_id = list(train_data.index)
gene_names = list(train_data)
train_data = np.array(train_data)
norm_train_data = preprocessing.MinMaxScaler().fit_transform(train_data)
all_labels = pd.read_table(root_path + 'labels.txt', sep=' ', header=0)\
.replace(['CMS1', 'CMS2', 'CMS3', 'CMS4'], [0, 1, 2, 3])
train_labels = np.array(all_labels.loc[all_labels['sample'].isin(sample_id)]['CMS_network'])
print("Train features:", train_data.shape)
return train_data, norm_train_data, train_labels, sample_id, gene_names
def load_test_data(train_genes, cohorts=np.array(['gse13067', 'gse13294', 'gse14333', 'gse17536', 'gse20916', 'gse2109',
'gse35896', 'gse37892', 'gse39582', 'kfsyscc', 'petacc3'])):
root_path = './CRC_dataset/'
all_test_data, all_test_labels, all_sample_ids, all_test_cohorts = None, None, None, None
all_labels = pd.read_table(root_path + 'labels.txt', sep=' ', header=0)\
.replace(['CMS1', 'CMS2', 'CMS3', 'CMS4'], [0, 1, 2, 3])
for i in range(cohorts.shape[0]):
test_file = root_path + cohorts[i] + '.txt'
test_data = | pd.read_table(test_file, sep=' ', header=0) | pandas.read_table |
import pandas as pd
import time
from bs4 import BeautifulSoup
import requests
import sys
import random
def get_headers():
"""
Genera un diccionario con los datos del header. Incluye una lista de diferentes user agent de la cual elige uno
de manera aleatoria.
"""
uastrings = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.111 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.72 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10) AppleWebKit/600.1.25 (KHTML, like Gecko) Version/8.0 Safari/600.1.25",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:33.0) Gecko/20100101 Firefox/33.0",
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.111 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.111 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.1.17 (KHTML, like Gecko) Version/7.1 Safari/537.85.10",
"Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:33.0) Gecko/20100101 Firefox/33.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.104 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36"
]
headers = {
"User-Agent": random.choice(uastrings),
}
return headers
def get_categories(year):
"""
toma como entrada la url de los awards del año X y devuelve una lista de tuplas
con el titulo de la categoria y la url del ganador y nominaciones de esta.
Inputs:
year: año de lectura
Returns:
list_cat: lista de tuplas (categoria, url)
"""
url_goodreads = 'https://www.goodreads.com'
url_awards = f'{url_goodreads}/choiceawards/best-books-{year}'
page_awards = requests.get(url_awards)
soup_main = BeautifulSoup(page_awards.content, 'html.parser')
elements = soup_main.find_all(class_ = 'category clearFix')
list_cat = []
for elem in elements:
element_category = elem.a.text.replace('\n', '')
url_best_cat = f"{url_goodreads}{elem.a.get('href')}"
list_cat.append((element_category, url_best_cat))
return list_cat
def scrap_winner_page(winner_cat_url):
"""
Hace scraping a la pagina de la categoria y extrae el titulo,
el numero de votos y la url (directorio) del libro.
Inputs
winner_cat_url: url de la pagina del ganador de categoria
Returns:
title: Titulo del libro
num_votes: Numero de votos
url_book: Directorio dentro de la url donde se encuentra
la pagina del libro
"""
page_cat_winner = requests.get(winner_cat_url)
soup_cat = BeautifulSoup(page_cat_winner.content, 'html.parser')
title = soup_cat.find(class_ = 'winningTitle choice gcaBookTitle').text
num_votes = int(soup_cat.find(class_ = 'greyText gcaNumVotes').text \
.replace(',', '') \
.replace('\n', '') \
.replace('votes', ''))
url_book = soup_cat.find(class_ = 'winningTitle choice gcaBookTitle').get('href')
return title, num_votes, url_book
def get_databox(soup_book):
"""
Devuelve un diccionario con los datos del elemento databox de cada libro.
Inputs:
soup_book: elemento soup del libro
Returns:
dict_databox: diccionario con los resultados
"""
# leemos la tabla de boox data box:
databox_key = soup_book.find('div' , id = 'bookDataBox').find_all('div', class_ = 'infoBoxRowTitle')
databox_key = [elem.text.strip() for elem in databox_key]
databox_value = soup_book.find('div' , id = 'bookDataBox').find_all('div', class_ = 'infoBoxRowItem')
databox_value = [elem.text.strip() for elem in databox_value]
dict_databox = {key:value for key, value in zip(databox_key, databox_value)}
return dict_databox
def load_data_category(cat_elem):
"""
Scrapea la url del libro ganador de una categoría y devuelve un diccionario con
los datos
Inputs:
cat_elem: tupla de informacion [categoría, url]
Returns:
dict_book: Diccionario con la siguiente informacion:
category: Categoria donde ha ganado el libro
title: Titulo
votes: Numero de votos
author_name: <NAME> autor
book_series: Saga a la que pertenece el libro
rating_value: Puntuacion en goodreads
num_ratings: Numero de valoraciones
num_reviews: Numero de reviews
list_genres: Lista de generos asociados al libro
book_format: Formato del libro
num_pages: Numero de paginas
publish_date: Fecha de publicacion
publisher: Editora de publicacion
original_title: Titulo original
isbn: ISBN
edition_language: Idioma de la edicion
setting: Lugar donde transcurre el libro
num_awards: Numero de premios recibidos
"""
dict_book = {}
url_goodreads = 'https://www.goodreads.com'
name_cat = cat_elem[0]
winner_cat_url = cat_elem[1]
title, votes, url_book = scrap_winner_page(winner_cat_url)
time.sleep(0.5) # ralentizar la velocidad de scrapeo
url_book = f"{url_goodreads}{url_book}"
dict_book['category'] = name_cat
dict_book['title'] = title
dict_book['votes'] = votes
book_page = requests.get(url_book)
soup_book = BeautifulSoup(book_page.content, 'html.parser')
# autor
try:
author_name = soup_book.find(class_ = 'authorName').text
except:
author_name = soup_book.find(class_ = 'authorName')[0].text
dict_book['author_name'] = author_name
# book series
try:
book_series = soup_book.find('h2', id = "bookSeries").text.strip()
except: # esto a lo mejor sobra
# da error si no existe el valor de bookseries. se asigna None
book_series = None
# devuelve esto si no tiene serie
# <h2 id="bookSeries">
# </h2>
dict_book['book_series'] = book_series
# rating numerico
rating_value = soup_book.find(itemprop = "ratingValue").text.strip()
dict_book['rating_value'] = rating_value
# numero votaciones
num_ratings = int(soup_book.find('meta' ,
itemprop = 'ratingCount') \
.text.strip() \
.split('\n')[0] \
.replace(',', ''))
dict_book['num_ratings'] = num_ratings
# numero reviews
num_reviews = int(soup_book.find('meta' ,
itemprop = 'reviewCount') \
.text.strip() \
.split('\n')[0] \
.replace(',', ''))
dict_book['num_reviews'] = num_reviews
# generos de goodreads
list_gen = [soup_tag.text for soup_tag in soup_book.find_all('a' , class_ = 'actionLinkLite bookPageGenreLink')]
list_gen = '_'.join(list(dict.fromkeys(list_gen)))
dict_book['list_genres'] = list_gen
# tipo de tapa
book_format = soup_book.find('span' ,
itemprop = 'bookFormat').text
dict_book['book_format'] = book_format
# numero de paginas
num_pages = int(soup_book.find('span' ,
itemprop = 'numberOfPages') \
.text.split(' ')[0])
dict_book['num_pages'] = num_pages
# fecha publicacion
publish_date = soup_book.find('div' , id = 'details') \
.find_all('div', class_='row')[1] \
.text.strip().split('\n')[1] \
.strip()
dict_book['publish_date'] = publish_date
# nombre publicador
publisher = soup_book.find('div' , id = 'details') \
.find_all('div', class_='row')[1] \
.text.strip() \
.split('\n')[2] \
.replace('by', '') \
.strip()
dict_book['publisher'] = publisher
# extraemos la tabla desplegable de informacion del libro
databox = get_databox(soup_book)
# titulo original
try:
original_title = databox['Original Title']
except:
original_title = None
dict_book['original_title'] = original_title
# isbn si viene
try:
isbn = databox['ISBN'].split('\n')[0]
except:
# no esta en databox
isbn = None
dict_book['isbn'] = isbn
# edition language
try:
edition_language = databox['Edition Language']
except:
edition_language = None
dict_book['edition_language'] = edition_language
# setting
try:
setting = databox['Setting']
setting = setting.split('\n')[0].strip()
except:
setting = None
dict_book['setting'] = setting
# nº premios
try:
num_awards = len(databox['Literary Awards'] \
.replace('...more', ', ') \
.replace('\n', '') \
.replace('...less', '') \
.split(', '))
except:
num_awards = None
dict_book['num_awards'] = num_awards
return dict_book
if __name__ == '__main__':
if len(sys.argv) == 1:
print("[ERROR] Please give as input at least one year of data")
else:
year = int(sys.argv[1])
namefile = f'csv/goodreads_awards_{sys.argv[1]}.csv'
print(f"[INFO] Reading awards data from year {year}...")
list_cat = get_categories(year)
dict_book_list = []
for cat_elem in list_cat:
print(f"[INFO] + category: {cat_elem[0]}")
dict_book_result = load_data_category(cat_elem)
dict_book_list.append(dict_book_result)
if len(dict_book_list) > 0:
df_result_year = pd.DataFrame([ | pd.Series(elem) | pandas.Series |
import streamlit as st
from ml_api.ml import QuestionGenerationAPI
import spacy
from spacy.pipeline import EntityRuler
from spacy import displacy
from collections import defaultdict
import pandas as pd
# https://qiita.com/irisu-inwl/items/9d49a14c1c67391565f8
@st.cache(allow_output_mutation=True)
def load_ml(ml):
ml.load()
return ml
@st.cache(allow_output_mutation=True)
def get_nlp():
nlp = spacy.load('ja_ginza')
try:
entity_ruler = nlp.get_pipe("entity_ruler")
except KeyError:
entity_ruler = nlp.add_pipe("entity_ruler", before="ner")
entity_ruler.initialize(lambda: [], nlp=nlp, patterns=[])
return nlp, entity_ruler
def pickup_ne_candidates(doc):
'''
カッコで区切られた部分を固有表現の候補とする
'''
markers = [[ '「', '『', '"'],
[ '」', '』', '"' ]]
ne_hint, content, focus_marker = [], [], None
ne_tokens, content_tokens, focus_types = [], [], []
for token in doc:
if focus_marker is not None:
if token.text == markers[1][focus_marker]: # 閉じカッコ
# 閉じカッコ直前から遡って名詞・代名詞を固有表現候補に
ne_hint = []
for ti in reversed(range(content[0].i-1)):
t = doc[ti]
if t.pos_ in ['NOUN', 'PROPN']:
ne_hint.insert(0, t)
else:
break
content_tokens.append(content)
focus_types.append(markers[0][focus_marker])
ne_tokens.append(ne_hint)
content, focus_marker = [], None # クリア
continue
elif token.text not in markers[0]: # カッコ内 (さらなる開きカッコは考慮しない)
content.append(token)
if token.text in markers[0]: # 開きカッコ
focus_marker = markers[0].index(token.text)
content = []
continue
return content_tokens, ne_tokens, focus_types
# カッコ前の名詞と固有表現との対応付け
ne_dict = defaultdict(lambda: 'Product')
ne_dict.update({
'雑誌': 'Magazine',
'誌': 'Magazine',
'番組': 'Broadcast_Program',
'ドラマ': 'Broadcast_Program',
'映画': 'Movie',
'作': 'Movie',
'ロックバンド': 'Show_Organizaiton',
})
def summarize_sentences_none(nlp, text):
'''
テキストをそのまま返す。簡単な要約
'''
return text
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lex_rank import LexRankSummarizer
def summarize_sentences(nlp, text, sentences_count=3):
text1 = text.replace('\n', '')
corpus = []
originals = []
doc = nlp(text1)
for s in doc.sents:
originals.append(s)
tokens = []
for t in s:
tokens.append(t.lemma_)
corpus.append(' '.join(tokens))
del doc
# 連結したcorpusを再度tinysegmenterでトークナイズさせる
parser = PlaintextParser.from_string(''.join(corpus), Tokenizer('japanese'))
summarizer = LexRankSummarizer()
summarizer.stop_words = [' '] # スペースも1単語として認識されるため
# sentences_count に要約後の文の数を指定します。
summary = summarizer(document=parser.document, sentences_count=sentences_count)
# 元の文を表示
return "".join([originals[corpus.index(sentence.__str__())].text for sentence in summary])
def summarize_sentences_head_and_tail(nlp, text):
'''
テキストの先頭文と末尾文を抜き出す。簡単な要約
'''
text1 = text.rstrip('\n').split('\n')
return "".join([text1[0], text1[-1]])
def summarize_sentences_tail(nlp, text):
'''
テキストの末尾文を抜き出す。簡単な要約
'''
text1 = text.rstrip('\n').split('\n')
return "".join([text1[-2], text1[-1]])
def ner_sentences(nlp, doc_text):
doc = nlp(doc_text)
tokens, ne_tokens, focus_types = pickup_ne_candidates(doc)
patterns = [] # 新たなNEパターン
ne = 'Product' # 指定がなければ 'Product' とみなす
# NHKバラエティ番組『LIFE!~人生に捧げるコント~』
# token = [LIFE, !, ~, 人生, に, 捧げる, コント, ~]
# ne_token = [NHK, バラエティ, 番組]
# focus_type = 『
for t, n, f in zip(tokens, ne_tokens, focus_types):
if f in ['『']: # ニュース記事で新たな固有表現を導入する典型
if len(n) > 0:
ne = ne_dict[n[-1].text] # カッコ直前のトークン列の末尾を代表NEと仮定
else:
None # 省略の場合、直前の固有表現を引き継ぐと想定
text = "".join([tok.text for tok in t])
# 既に登録済のNEを除外
if not [ ent for ent in doc.ents if ent.text == text and ent.label_ == ne]:
patterns.append({"label": ne, "pattern": text})
# 再度NER
if len(patterns) > 0:
print("add entity-ruler patterns: ", patterns)
entity_ruler = nlp.get_pipe('entity_ruler')
entity_ruler.add_patterns(patterns)
doc = nlp(doc_text)
return doc
@st.cache(allow_output_mutation=True)
def generate(ml, answer_context_list):
return ml.generate_questions(answer_context_list)
def main():
st.title('deep-question-generation sample')
'''
this is a [t5-base-japanese-question-generation](https://huggingface.co/sonoisa/t5-base-japanese-question-generation) sample.
'''
ml = QuestionGenerationAPI()
ml = load_ml(ml)
nlp, ruler = get_nlp()
context_text = title = st.text_area('conxtext', context_default)
answer_text = title = st.text_input('answer', answer_default)
generate_button = st.button('Generate question')
if generate_button:
for (context, context_type) in [
[summarize_sentences_none(nlp, context_text), 'そのまま'],
# [summarize_sentences(nlp, context_text), '抽出型要約 sumy'],
[summarize_sentences_head_and_tail(nlp, context_text), '先頭末尾'],
[summarize_sentences_tail(nlp, context_text), '末尾2文'],
]:
st.markdown("""---""")
st.write(f'context ({context_type})')
st.write(context)
st.write('answer specified:')
st.write(answer_text)
doc = ner_sentences(nlp, context)
st.write('first sentence:')
for sent in doc.sents:
st.write(sent.text)
break
st.write('generated_question:')
generated_questions = generate(ml, [
[answer_text, context_text]
])
st.write(generated_questions[0])
# NEをanswerとしてquestion-generation
ner_questions = []
for ent in doc.ents:
generated_questions = generate(ml, [
[ent.text, context_text]
])
ner_questions.append([
ent.text,
ent.label_,
ent.start_char,
ent.end_char,
generated_questions[0]
])
df = | pd.DataFrame(ner_questions, columns=['ent', 'label', 'start', 'end', 'question']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import os
from scipy.spatial import distance
import networkx as nx
import math
import scipy.sparse as sp
from glob import glob
import argparse
import time
parser = argparse.ArgumentParser(description='Main Entrance of MP_MIM_RESEPT')
parser.add_argument('--sampleName', type=str, default='151507')
parser.add_argument('--MP-k-num', type=int, default=90, help='number of k_num in KNN graph of message passing (default: 90)')
parser.add_argument('--MP-l-num', type=int, default=15, help='number of layer_num in message passing (default: 15)')
args = parser.parse_args()
####KNN
# knn_graph_edgelist
def calculateKNNgraphDistanceWeighted(featureMatrix, distanceType, k):
edgeListWeighted = []
for i in np.arange(featureMatrix.shape[0]):
tmp = featureMatrix[i, :].reshape(1, -1)
distMat = distance.cdist(tmp, featureMatrix, distanceType)
res = distMat.argsort()[:k + 1]
tmpdist = distMat[0, res[0][1:k + 1]]
boundary = np.mean(tmpdist) + np.std(tmpdist)
for j in np.arange(1, k + 1):
if distMat[0, res[0][j]] <= boundary and i != res[0][j] :
edgeListWeighted.append((i, res[0][j], 1))
return edgeListWeighted
# generate_adj_nx_matirx
def generate_adj_nx_weighted_adj(featureMatrix, distanceType, k):
edgeList = calculateKNNgraphDistanceWeighted(featureMatrix, distanceType, k)
nodes = range(0,featureMatrix.shape[0])
Gtmp = nx.Graph()
Gtmp.add_nodes_from(nodes)
Gtmp.add_weighted_edges_from(edgeList)
adj = nx.adjacency_matrix(Gtmp)
adj_knn_by_feature = np.array(adj.todense())
return adj_knn_by_feature
# generate_self_loop_adj
def preprocess_graph_self_loop(adj):
adj = sp.coo_matrix(adj)
adj_ = adj + sp.eye(adj.shape[0])
adj_ = adj_.A
return adj_
####MP
# attention_ave
def gat_forward_att_ave(adj, Wh):
attention_ave = adj
attention_ave_par = attention_ave.sum(axis=1, keepdims=True)
attention_ave_final = attention_ave/attention_ave_par
h_prime = np.dot(attention_ave_final, Wh)
return h_prime
# attention_dis
def softmax(X):
X_exp = np.exp(X)
partition = X_exp.sum(axis=1, keepdims=True)
return X_exp/partition
def _prepare_euclidean_attentional_mechanism_input(Wh):
distMat = distance.cdist(Wh, Wh, 'euclidean')
return distMat
def gat_forward_euclidean(adj, Wh):
e = _prepare_euclidean_attentional_mechanism_input(Wh)
zero_vec = -9e15*np.ones_like(e)
attention = np.where(adj > 0, e, zero_vec)
attention = softmax(attention)
h_prime = np.dot(attention, Wh)
return h_prime
# layer_loop_att_ave
def forward_basic_gcn_multi_layer(adj, Wh, layer_num):
hidden = Wh
for num in range(layer_num):
h = gat_forward_att_ave(adj , hidden)
hidden = h
#print(num)
return hidden
# layer_loop_att_euc
def forward_dis_gcn_multi_layer(adj, Wh, layer_num):
hidden = Wh
for num in range(layer_num):
h = gat_forward_euclidean(adj , hidden)
hidden = h
#print(num)
return hidden
####MI_GC
# MI
def Moran_I(multi_hop_weight_mat, feature, MI_type='normal'):
if MI_type == 'normal':
w = multi_hop_weight_mat
y = feature
n = len(y)
z = y - y.mean()
z2ss = (z * z).sum()
s0 = np.sum(w)
zl = np.dot(w , z)
inum = (z * zl).sum()
MI = n / s0 * inum / z2ss
if MI_type == 'row_normalizaiton':
WR_temp = multi_hop_weight_mat
WR = np.zeros((WR_temp.shape[0],WR_temp.shape[1]))
each_row_sum_list=[]
for i in range(WR_temp.shape[0]):
each_row_sum_list.append(np.sum(WR_temp[i,:]))
for i in range(WR_temp.shape[0]):
for j in range(WR_temp.shape[1]):
if WR_temp[i,j] != 0:
WR[i,j] = WR_temp[i,j]/each_row_sum_list[i]
w = WR
y = feature
n = len(y)
z = y - y.mean()
z2ss = (z * z).sum()
s0 = np.sum(w)
zl = np.dot(w , z)
inum = (z * zl).sum()
MI = n / s0 * inum / z2ss
return MI
# GC
def GC_related(multi_hop_weight_mat, feature, GC_type='normal'):
if GC_type == 'normal':
w = multi_hop_weight_mat
y = np.asarray(feature).flatten()
n = len(y)
s0 = np.sum(w)
yd = y - y.mean()
yss = sum(yd * yd)
den = yss * s0 * 2.0
_focal_ix, _neighbor_ix = w.nonzero()
_weights = csr_matrix(w).data
num = (_weights * ((y[_focal_ix] - y[_neighbor_ix])**2)).sum()
a = (n - 1) * num
GC = a / den
if GC > 1:
GC_related = GC - 1
if GC < 1:
GC_related = 1 - GC
if GC == 1:
GC_related = 0
if GC_type == 'row_normalizaiton':
WR_temp = multi_hop_weight_mat
WR = np.zeros((WR_temp.shape[0],WR_temp.shape[1]))
each_row_sum_list=[]
for i in range(WR_temp.shape[0]):
each_row_sum_list.append(np.sum(WR_temp[i,:]))
for i in range(WR_temp.shape[0]):
for j in range(WR_temp.shape[1]):
if WR_temp[i,j] != 0:
WR[i,j] = WR_temp[i,j]/each_row_sum_list[i]
w = WR
y = np.asarray(feature).flatten()
n = len(y)
s0 = np.sum(w)
yd = y - y.mean()
yss = sum(yd * yd)
den = yss * s0 * 2.0
_focal_ix, _neighbor_ix = w.nonzero()
_weights = csr_matrix(w).data
num = (_weights * ((y[_focal_ix] - y[_neighbor_ix])**2)).sum()
a = (n - 1) * num
GC = a / den
if GC > 1:
GC_related = GC - 1
if GC < 1:
GC_related = 1 - GC
if GC == 1:
GC_related = 0
return GC_related
# spatial_adj_knn
def calculateKNNDistanceWeighted_spatial_autocor(featureMatrix, distanceType, k):
edgeListWeighted = []
for i in np.arange(featureMatrix.shape[0]):
tmp = featureMatrix[i, :].reshape(1, -1)
distMat = distance.cdist(tmp, featureMatrix, distanceType)
res = distMat.argsort()[:k + 1]
for j in np.arange(1, k + 1):
edgeListWeighted.append((i, res[0][j], 1))
return edgeListWeighted
# generate_adj_nx_matirx
def generate_spatial_adj_nx_weighted_based_on_coordinate(featureMatrix, distanceType, k):
edgeList = calculateKNNDistanceWeighted_spatial_autocor(featureMatrix, distanceType, k)
nodes = range(0,featureMatrix.shape[0])
Gtmp = nx.Graph()
Gtmp.add_nodes_from(nodes)
Gtmp.add_weighted_edges_from(edgeList)
adj = nx.adjacency_matrix(Gtmp)
adj_knn_by_coordinate = np.array(adj.todense())
return adj_knn_by_coordinate
# spatial_adj_distance
def MI_spatial_adj_matrix(coordinateMatrix, hop_num=1, distanceType='cityblock'):
distMat = distance.cdist(coordinateMatrix, coordinateMatrix, distanceType)
multi_hop_weight_mat = np.zeros((distMat.shape[0] , distMat.shape[1]))
if distanceType == 'euclidean':
if hop_num == 1:
for i in range(distMat.shape[0]):
for j in range(distMat.shape[1]):
if distMat[i][j] <= math.sqrt(2) and distMat[i][j] > 0:
multi_hop_weight_mat[i][j] = 1
return multi_hop_weight_mat
if __name__ == '__main__':
########RESEPT
####time_computing
start_time = time.time()
print("MP_MIM_RESEPT. Start Time: %s seconds" %
(start_time))
####parameter_set_initial
PEalphaList = ['0.1','0.2','0.3', '0.5', '1.0', '1.2', '1.5','2.0']
zdimList = ['3','10', '16','32', '64', '128', '256']
sample = args.sampleName
k_num_distance_att = args.MP_k_num
layer_num_distance_att = args.MP_l_num
####sample_list
sample_list = [ '151507','151508', '151509', '151510', '151669', '151670', '151671', '151672', '151673', '151674', '151675', '151676','18-64','2-5', '2-8', 'T4857']
letter_list = [ 'a','b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l','m', 'n', 'o', 'p']
count_init = sample_list.index(sample)
count = 56*count_init
letter = letter_list[count_init]
embedding_MIrow_max_list = []
embedding_name_list = []
####current_os
meta_folder_path = os.path.abspath('./meta_data_folder/metaData_brain_16_coords')
embedding_folder_path = os.path.abspath('./RESEPT_embedding_folder')
embedding_in_RESEPT_folder = "RESEPT_MP_embedding_"+sample+"/"
if not os.path.exists(embedding_in_RESEPT_folder):
os.makedirs(embedding_in_RESEPT_folder)
####MP_parameter_set
k_num_distance_att_list = [10,20,30,40,50,60,70,80,90]
layer_num_distance_att_list = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
####loop_part
for i in range(len(PEalphaList)):
for j in range((len(zdimList))):
####read_embedding
count = count + 1
embedding_root_path = '/'+sample+'_embedding_raw/'+letter+'_'+str(count)+'_outputdir-3S-'+sample+'_raw_EM1_resolution0.3_euclidean_dummy_add_PEalpha'+str(PEalphaList[i])+'_k6_NA_zdim'+str(zdimList[j])+'/'+sample+'_raw_6_euclidean_NA_dummy_add_'+str(PEalphaList[i])+'_intersect_160_GridEx19_embedding.csv'
embedding_df = pd.read_csv(embedding_folder_path+embedding_root_path,index_col=0)
embedding_celllist = embedding_df.index.tolist()
graph_embedding_name = sample+'_raw_res0.3_euclidean_NA_dummy_add_PEalpha'+str(PEalphaList[i])+'_k6_NA_zdim'+str(zdimList[j])+'_gat_self_loop_euc_graphK'+str(k_num_distance_att)+'_layer'+str(layer_num_distance_att)
embedding_name_list.append(graph_embedding_name)
native_embedding_whole = embedding_df[['embedding0','embedding1','embedding2']].values
####embedding_knn_graph
knn_graph_k_num = k_num_distance_att
l_num = layer_num_distance_att
adj = generate_adj_nx_weighted_adj(native_embedding_whole, distanceType='euclidean', k=knn_graph_k_num)
adj_self_loop = preprocess_graph_self_loop(adj)
graph_embedding_whole = forward_dis_gcn_multi_layer(adj_self_loop, native_embedding_whole, l_num)
graph_embedding_add_barcode_df = pd.DataFrame(graph_embedding_whole, index=embedding_celllist, columns=['embedding0','embedding1','embedding2'])
graph_embedding_add_barcode_df.to_csv(embedding_in_RESEPT_folder+sample+'_'+str(count)+'_raw_res0.3_euclidean_NA_dummy_add_PEalpha'+str(PEalphaList[i])+'_k6_NA_zdim'+str(zdimList[j])+'_gat_self_loop_euc_graphK'+str(knn_graph_k_num)+'_layer'+str(l_num)+'_graph_embedding.csv')
graph_embedding_remove_zero_df = graph_embedding_add_barcode_df.loc[~(graph_embedding_add_barcode_df==0).all(axis=1)]
#print(graph_embedding_remove_zero_df)
graph_embedding_remove_zero_whole = graph_embedding_remove_zero_df[['embedding0','embedding1','embedding2']].values
coordinate_graph_embedding_whole_df = pd.read_csv(meta_folder_path+'/'+sample+'_humanBrain_metaData.csv',index_col=0)
coordinate_graph_embedding_remove_zero_df = coordinate_graph_embedding_whole_df.loc[graph_embedding_remove_zero_df.index]
coordinate_graph_embedding_remove_zero_np = coordinate_graph_embedding_remove_zero_df[['array_row','array_col']].values
####MI_spatial_adj
MI_graph_embedding_spatial_adj = MI_spatial_adj_matrix(coordinate_graph_embedding_remove_zero_np, hop_num=1, distanceType='euclidean')
####MI_max
embedding_MIrow_list = []
for dim_num in range(graph_embedding_remove_zero_whole.shape[1]):
embedding_current_MIrow = Moran_I(MI_graph_embedding_spatial_adj, graph_embedding_remove_zero_whole[:,dim_num], 'row_normalizaiton')
embedding_MIrow_list.append(embedding_current_MIrow)
embedding_MIrow_list_np = np.array(embedding_MIrow_list)
embedding_MIrow_max_list.append(np.max(embedding_MIrow_list_np))
####save_result
MIrow_result_gat_euc_df = | pd.DataFrame({'embedding_name':embedding_name_list,'embedding_MP_MIM':embedding_MIrow_max_list}) | pandas.DataFrame |
import time
import warnings
import pandas as pd
import numpy as np
from collections import defaultdict
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.preprocessing import QuantileTransformer
from sklearn.compose import TransformedTargetRegressor
from sklearn.metrics import make_scorer
from sklearn.experimental import enable_halving_search_cv
from sklearn.model_selection import HalvingGridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score, accuracy_score, f1_score, balanced_accuracy_score
from sklearn.metrics import mean_squared_error, mean_absolute_error, explained_variance_score, r2_score
from sklearn.metrics import precision_score, recall_score, confusion_matrix
from sklearn.metrics import roc_curve, precision_recall_curve, det_curve
from sklearn.exceptions import NotFittedError
from .models import classifiers, regressors
from .functions import reverse_sorting_order, obj_name
class ModelNotSetError(ValueError):
"""Model is not set."""
pass
class ModelsNotSearchedError(ValueError):
"""Search for Models was not done."""
pass
class WrappedModelRegression:
"""Wrapper for Models in Regression problems.
Models get wrapped with TransformedTargetRegressor to transform y target before predictions on X features take
place. Wrapper additionally customizes __name__, __class__ and __str__ methods/attributes to return those values
from main Model (not TransformedTargetRegressor).
Attributes:
clf (sklearn.compose.TransformedTargetRegressor): Wrapped model for regression problems
"""
def __init__(self, regressor, transformer):
"""Create WrappedModelRegression object.
Override __name__ and __class__ attributes with appropriate attributes from regressor.
Args:
regressor (sklearn.Model): Model used to predict regression target
transformer (sklearn.Transformer): Transformer used to transform y (target)
"""
self.clf = TransformedTargetRegressor(regressor=regressor, transformer=transformer)
self.__name__ = self.clf.regressor.__class__.__name__
self.__class__ = self.clf.regressor.__class__
def fit(self, *args, **kwargs):
"""Fit Model in clf attribute with provided arguments.
Args:
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
self
"""
self.clf.fit(*args, **kwargs)
return self
def predict(self, *args, **kwargs):
"""Predict provided arguments with Model in clf attribute.
Args:
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
numpy.ndarray: predictions
"""
return self.clf.predict(*args, **kwargs)
def get_params(self, *args, **kwargs):
"""Return params of regressor inside wrapped clf Model.
Args:
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
dict: params of regressor
"""
return self.clf.regressor.get_params(*args, **kwargs)
def __str__(self):
"""Return __str__method of regressor inside wrapped clf Model.
Returns:
str: __str__ method of regressor
"""
return self.clf.regressor.__str__()
def __class__(self, *args, **kwargs):
"""Return new object of regressor class instantiated with *args and **kwargs arguments.
Args:
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
regressor: new regressor object
"""
return self.clf.regressor.__class__(*args, **kwargs)
class ModelFinder:
"""ModelFinder used to search for Models with best scores.
ModelFinder works in two ways - as a search engine for Models (either with GridSearch or by simply comparing
scores in a way similar to LazyPredict package) and as a container for the chosen Model to be used for fit/predict
methods, etc.
search is bread and butter method used to compare performance of different Models on provided X and y data. As also
explained in search, Models can be either predefined (as already created instances or Model: param_grid pairs) or
not, in which case default collection of Models is used in search.
Train splits are used to fit Models, but assessment of how well they perform given a specific scoring function is
performed on test splits.
Note:
provided X and y needs to be pre-transformed.
Attributes:
X (pandas.DataFrame, numpy.ndarray, scipy.csr_matrix): X feature space (transformed)
y (pandas.Series, numpy.ndarray): target variable
X_train (pandas.DataFrame, numpy.ndarray, scipy.csr_matrix): train split of X features (transformed)
X_test (pandas.DataFrame, numpy.ndarray, scipy.csr_matrix): test split of X features (transformed)
y_train (pandas.Series, numpy.ndarray): train split of target variable
y_test (pandas.Series, numpy.ndarray): test split of target variable
problem (str): string representing type of a problem, one of _classification, _regression or _multiclass
attributes
random_state (int, None): integer for reproducibility on fitting and transformations, defaults to None if not
provided during __init__
scoring_functions (list): scoring functions specific to a given problem type
default_models (dict): dictionary of 'Model class': param_grid dict pairs specific to a given problem type
default_scoring (function): default scoring to be used specific to a given problem type
target_transformer (sklearn.Transformer): Transformer used to transform y target variable in regression
"""
_classification = "classification"
_regression = "regression"
_multiclass = "multiclass"
_quicksearch_limit = 3 # how many Models are chosen in quicksearch
_scoring_classification = [accuracy_score, balanced_accuracy_score, f1_score, roc_auc_score]
_scoring_regression = [mean_squared_error, mean_absolute_error, explained_variance_score, r2_score]
_scoring_multiclass_parametrized = [
(f1_score, {"average": "micro"}, "f1_score_micro"),
(f1_score, {"average": "weighted"}, "f1_score_weighted"),
(precision_score, {"average": "weighted"}, "precision_score_weighted"),
(recall_score, {"average": "weighted"}, "recall_score_weighted"),
]
_scoring_multiclass = [accuracy_score, balanced_accuracy_score]
# hardcoded strings and parameters
_model_name = "model"
_fit_time_name = "fit_time"
_params_name = "params"
_transformed_target_name = "TransformedTargetRegressor__transformer"
_mode_quick = "quick"
_mode_detailed = "detailed"
_modes = [_mode_quick, _mode_detailed]
_target_categorical = "categorical"
_target_numerical = "numerical"
_target_categories = [_target_categorical, _target_numerical]
_probas_functions = ["roc_auc_score"]
def __init__(self, X, y, X_train, X_test, y_train, y_test, target_type, random_state=None):
"""Create ModelFinder object with provided X and y arguments.
Set default values of attributes and create dummy model depending on target_type.
Args:
X (pandas.DataFrame, numpy.ndarray, scipy.csr_matrix): X feature space (transformed)
y (pandas.Series, numpy.ndarray): target variable
X_train (pandas.DataFrame, numpy.ndarray, scipy.csr_matrix): train split of X features (transformed)
X_test (pandas.DataFrame, numpy.ndarray, scipy.csr_matrix): test split of X features (transformed)
y_train (pandas.Series, numpy.ndarray): train split of target variable
y_test (pandas.Series, numpy.ndarray): test split of target variable
target_type (str): string representing type of a problem to which Models should be created
random_state (int, optional): integer for reproducibility on fitting and transformations, defaults to None
Raises:
ValueError: if target_type is not one of _classification, _regression or _multiclass attributes
"""
self.random_state = random_state
self.X = X
self.y = y
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
if target_type in self._target_categories:
self._set_problem(target_type)
else:
raise ValueError("Expected one of the categories: {categories}; got {category}".format(
categories=", ".join(self._target_categories), category=target_type
))
self._chosen_model = None
self._chosen_model_params = None
self._chosen_model_scores = None
self._search_results = None
self._search_results_dataframe = None
self._quicksearch_results = None
self._gridsearch_results = None
self._dummy_model, self._dummy_model_scores = self._create_dummy_model()
####################################################################
# =============== Exposed Search and Fit functions =============== #
####################################################################
def search_and_fit(self, models=None, scoring=None, mode=_mode_quick):
"""Search for the Model and set it as the chosen Model.
Refer to specific functions for further details.
Args:
models (list, dict, optional): list or 'model class': param_grid dict pairs, defaults to None
scoring (func, optional): scoring function with which performance assessment will be made, defaults to None
mode (str, optional): mode of search to be done, defaults to _mode_quick class attribute
Returns:
sklearn.Model: Model with best performance, fit to X and y data
"""
model = self.search(models, scoring, mode)
self.set_model(model)
self.fit()
return self._chosen_model
def set_model_and_fit(self, model):
"""Set provided model as a chosen Model of ModelFinder and fit it to X and y data.
Args:
model (sklearn.Model): instantiated Model
"""
self.set_model(model)
self.fit()
def search(self, models=None, scoring=None, mode=_mode_quick):
"""Search for Models that have the best performance with provided arguments.
Models can be:
- list of initialized models
- dict of 'Model Class': param_grid of a given model to do the GridSearch on
- None - default Models collection will be used
scoring should be a sklearn scoring function. If None is provided, default scoring function will be used.
mode can be:
- "quick": search is initially done on all models but with no parameter tuning after which top
_quick_search_limit Models are chosen and GridSearched with their param_grids
- "detailed": GridSearch is done on all default models and their params
Provided mode doesn't matter when models are explicitly provided (not None).
After initial quicksearch/gridsearch, best model from each class is chosen and have its assessment performed.
Results of this are set in _search_results and _search_results_dataframe attributes.
Note:
random_state of Models will be overriden with random_state attribute
Args:
models (list, dict, optional): list of Models or 'Model class': param_grid dict pairs, defaults to None
scoring (func, optional): sklearn scoring function, defaults to None
mode ("quick", "detailed", optional): either "quick" or "detailed" string, defaults to "quick"
Returns:
sklearn.Model: new (not fitted) instance of the best scoring Model
Raises:
ValueError: mode is not "quick" or "detailed"; type of models is not dict, list-like or None
"""
if scoring is None:
scoring = self.default_scoring
if mode not in self._modes:
raise ValueError("Expected one of the modes: {modes}; got {mode}".format(
modes=", ".join(self._modes), mode=mode
))
if isinstance(models, dict) or models is None:
initiated_models = self._search_for_models(models, mode, scoring)
else:
try:
# in case of Str as it doesn't raise TypeError with iter() function
if isinstance(models, str):
raise TypeError
iter(models)
initiated_models = [self._wrap_model(single_model) for single_model in models]
except TypeError:
raise ValueError("models should be Dict, List-like or None, got {models}".format(models=models))
scored_and_fitted_models, search_results = self._assess_models_performance(initiated_models, scoring)
self._search_results = scored_and_fitted_models
self._search_results_dataframe = self._create_search_results_dataframe(search_results, scoring)
sorting_order = reverse_sorting_order(obj_name(scoring))
scored_and_fitted_models.sort(key=lambda x: x[1], reverse=sorting_order)
best_model = scored_and_fitted_models[0][0] # first item in the list, first item in the (model, score) tuple
new_model = best_model.__class__(**best_model.get_params())
return new_model
def set_model(self, model):
"""Set model as a chosen ModelFinder model.
Create additional copy of the model and calculate it's scores with scoring functions specific to a given
problem.
Args:
model (sklearn.Model): Instantiated Model
"""
model = self._wrap_model(model)
params = model.get_params()
copy_for_scoring = model.__class__(**params).fit(self.X_train, self.y_train)
self._chosen_model = model
self._chosen_model_params = params
self._chosen_model_scores = self._score_model(copy_for_scoring, self.default_scoring)
def best_model(self):
"""Return _chosen_model attribute (chosen Model).
Returns:
sklearn.Model
"""
return self._chosen_model
def fit(self):
"""Fit chosen Model from _chosen_model attribute on X and y data.
Raises:
ModelNotSetError: when no Model was set as a chosen Model
"""
if self._chosen_model is None:
raise ModelNotSetError(
"Model needs to be set before fitting. Call 'set_model' or 'search' for a model before trying to fit."
)
self._chosen_model.fit(self.X, self.y)
def predict(self, X):
"""Predict target variable from provided X features.
Returns:
numpy.ndarray: predicted target values from X
Raises:
ModelNotSetError: when no Model was set as a chosen Model
"""
if self._chosen_model is None:
raise ModelNotSetError(
"Model needs to be set and fitted before prediction. Call 'set_model' or 'search' for a model before."
)
return self._chosen_model.predict(X)
def quicksearch_results(self):
"""Return quicksearch results from _quicksearch_results attribute.
Returns:
pandas.DataFrame
"""
return self._quicksearch_results
def gridsearch_results(self):
"""Return gridsearch results from _gridsearch_results attribute.
Returns:
pandas.DataFrame
"""
return self._gridsearch_results
#########################################################################
# =============== Visualization Data for View functions =============== #
#########################################################################
def search_results(self, model_limit):
"""Return detailed search results DataFrame from _search_results_dataframe.
model_limit restricts the number of Models and their results to be returned. Number of rows in the DataFrame
is always model_limit + 1, as results from Dummy Model are being appended at the end.
Args:
model_limit (int): number of rows to be returned
Returns:
pandas.DataFrame: search results
Raises:
ModelsNotSearchError: when no search and performance assessment between Models happened
"""
if self._search_results_dataframe is None:
raise ModelsNotSearchedError("Search Results is not available. Call 'search' to obtain comparison models.")
# dummy is always included, regardless of model limit
models = self._search_results_dataframe.iloc[:model_limit]
dummy = self._dummy_model_results()
df = | pd.concat([models, dummy], axis=0) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Mike
# @Contact : <EMAIL>
# @Time : 2020/1/6 22:46
# @File : cross_feature.py
"""
top100的特征强制相除交叉
"""
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import numpy as np
cross_feature_num = 100
def LGB_test(train_x, train_y, test_x, test_y, cate_col=None):
if cate_col:
data = pd.concat([train_x, test_x])
for fea in cate_col:
data[fea] = data[fea].fillna('-1')
data[fea] = LabelEncoder().fit_transform(data[fea].apply(str))
train_x = data[:len(train_x)]
test_x = data[len(train_x):]
print("LGB test")
clf = lgb.LGBMClassifier(
boosting_type='gbdt', num_leaves=31, reg_alpha=0.0, reg_lambda=1,
max_depth=-1, n_estimators=3000, objective='binary',
subsample=0.7, colsample_bytree=0.7, subsample_freq=1, # colsample_bylevel=0.7,
learning_rate=0.01, min_child_weight=25, random_state=2018, n_jobs=50
)
clf.fit(train_x, train_y, eval_set=[(train_x, train_y), (test_x, test_y)], early_stopping_rounds=100)
feature_importances = sorted(zip(train_x.columns, clf.feature_importances_), key=lambda x: x[1])
return clf.best_score_['valid_1']['binary_logloss'], feature_importances
def off_test_split(org, cate_col=None):
data = org[org.is_trade > -1]
data = data.drop(
['hour48', 'hour', 'user_id', 'query1', 'query',
'instance_id', 'item_property_list', 'context_id', 'context_timestamp', 'predict_category_property'], axis=1)
data['item_category_list'] = LabelEncoder().fit_transform(data['item_category_list'])
y = data.pop('is_trade')
train_x, test_x, train_y, test_y = train_test_split(data, y, test_size=0.15, random_state=2018)
train_x.drop('day', axis=1, inplace=True)
test_x.drop('day', axis=1, inplace=True)
score = LGB_test(train_x, train_y, test_x, test_y, cate_col)
return score[1]
def LGB_predict(data, file):
data = data.drop(['hour48', 'hour', 'user_id', 'shop_id', 'query1', 'query',
'item_property_list', 'context_id', 'context_timestamp', 'predict_category_property'], axis=1)
data['item_category_list'] = LabelEncoder().fit_transform(data['item_category_list'])
train = data[data['is_trade'] > -1]
predict = data[data['is_trade'] == -2]
res = predict[['instance_id']]
train_y = train.pop('is_trade')
train_x = train.drop(['day', 'instance_id'], axis=1)
test_x = predict.drop(['day', 'instance_id', 'is_trade'], axis=1)
clf = lgb.LGBMClassifier(
boosting_type='gbdt', num_leaves=31, reg_alpha=0.0, reg_lambda=1,
max_depth=-1, n_estimators=3000, objective='binary',
subsample=0.7, colsample_bytree=0.7, subsample_freq=1, # colsample_bylevel=0.7,
learning_rate=0.01, min_child_weight=25, random_state=2018, n_jobs=50
)
clf.fit(train_x, train_y, eval_set=[(train_x, train_y)])
res['predicted_score'] = clf.predict_proba(test_x)[:, 1]
testb = pd.read_csv('../data/round2_ijcai_18_test_b_20180510.txt', sep=' ')[['instance_id']]
res = pd.merge(testb, res, on='instance_id', how='left')
res[['instance_id', 'predicted_score']].to_csv('../submit/' + file + '.txt', sep=' ', index=False)
def add(f1, f2):
for i in f2:
f1 = pd.merge(f1, i, on='instance_id', how='left')
return f1
if __name__ == '__main__':
org = pd.read_csv('../data/origion_concat.csv')
train = org[org.day == 7]
query = pd.read_csv('../data/query_all.csv')
leak = pd.read_csv('../data/leak_all.csv')
comp = pd.read_csv('../data/compare_all.csv')
day6_cvr = pd.read_csv('../data/6day_cvr_feature.csv')
days7_cvr = pd.read_csv('../data/7days_cvr_feature.csv')
day6_rank = pd.read_csv('../data/rank_feature_6day.csv')
days7_rank = pd.read_csv('../data/rank_feature_7days.csv')
nobuy = pd.read_csv('../data/nobuy_feature.csv')
trend = | pd.read_csv('../data/trend_feature.csv') | pandas.read_csv |
import pandas as pd
from Data_BCG.Download_Data import scraping_Functions as sf
# performance = sf.get_aggregated_season_data(1980)
per_game_data = sf.get_game_data(2009)
birthplaces = sf.get_birthplaces()
high_schools = sf.get_high_school_cities()
player_id = sf.get_players_id()
# Standarizing each database
# I removed the * at the end of the name of the players from every player
# performance.player = performance.player.str.replace("\*", "")
player_id = player_id.rename(str.lower, axis="columns")
per_game_data = pd.merge(per_game_data, player_id, how="left", on="id")
names = per_game_data["player"]
per_game_data.drop("player", axis=1, inplace=True)
per_game_data.insert(0, "player", names)
# Eliminated columns that weren't useful to the final database
high_schools = high_schools.iloc[:, [0, 1, 2]].rename(str.lower, axis="columns")
high_schools.player = high_schools.player.str.replace("\*", "")
# Restricted the sample to players that were born in the US
# Eliminated columns that weren't useful to the final database
birthplaces.player = birthplaces.player.str.replace("\*", "")
birthplaces = birthplaces[birthplaces.country_iso2 == "US"]
birthplaces = birthplaces.iloc[:, [0, 1, 3]]
# Merged the databases into one, restricting the data with "inner"
# performance_aggr = pd.merge(performance, birthplaces, how="inner", on="player")
# performance_aggr = pd.merge(performance_aggr, high_schools, how="inner", on=["player", "state"])
per_game_aggr = | pd.merge(per_game_data, birthplaces, how="inner", on="player") | pandas.merge |
# -*- coding: utf-8 -*-
from numpy import where as npWhere
from pandas import DataFrame
from pandas_ta.overlap import hlc3, ma
from pandas_ta.utils import get_drift, get_offset, non_zero_range, verify_series
def kvo(high, low, close, volume, fast=None, slow=None, length_sig=None, mamode=None, drift=None, offset=None, **kwargs):
"""Indicator: Klinger Volume Oscillator (KVO)"""
# Validate arguments
fast = int(fast) if fast and fast > 0 else 34
slow = int(slow) if slow and slow > 0 else 55
length_sig = int(length_sig) if length_sig and length_sig > 0 else 13
mamode = mamode.lower() if mamode and isinstance(mamode, str) else "ema"
_length = max(fast, slow, length_sig)
high = verify_series(high, _length)
low = verify_series(low, _length)
close = verify_series(close, _length)
volume = verify_series(volume, _length)
drift = get_drift(drift)
offset = get_offset(offset)
if high is None or low is None or close is None or volume is None: return
# Calculate Result
mom = hlc3(high, low, close).diff(drift)
trend = npWhere(mom > 0, 1, 0) + npWhere(mom < 0, -1, 0)
dm = non_zero_range(high, low)
m = high.size
cm = [0] * m
for i in range(1, m):
cm[i] = (cm[i - 1] + dm[i]) if trend[i] == trend[i - 1] else (dm[i - 1] + dm[i])
vf = 100 * volume * trend * abs(2 * dm / cm - 1)
kvo = ma(mamode, vf, length=fast) - ma(mamode, vf, length=slow)
kvo_signal = ma(mamode, kvo, length=length_sig)
# Offset
if offset != 0:
kvo = kvo.shift(offset)
kvo_signal = kvo_signal.shift(offset)
# Handle fills
if "fillna" in kwargs:
kvo.fillna(kwargs["fillna"], inplace=True)
kvo_signal.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
kvo.fillna(method=kwargs["fill_method"], inplace=True)
kvo_signal.fillna(method=kwargs["fill_method"], inplace=True)
# Name and Categorize it
kvo.name = f"KVO_{fast}_{slow}"
kvo_signal.name = f"KVOSig_{length_sig}"
kvo.category = kvo_signal.category = "volume"
# Prepare DataFrame to return
data = {kvo.name: kvo, kvo_signal.name: kvo_signal}
kvoandsig = | DataFrame(data) | pandas.DataFrame |
#params aaaaa
import numpy as np
from copy import copy , deepcopy
from collections import Iterable, Collection
import pandas as pd
import random
DEBUG = False
class InvalidArguments(Exception) :
def __init__(self, err="invalid arguments") :
self.err = err
def __str__(self) :
return self.err
class Individual() :
class KeyValueError(Exception) :
def __init__(self, err_msg) :
self.err_msg = err_msg
def __str__(self) :
str(self.err_msg)
def __init__(self, *args, **kwargs) :
if args is not None and isinstance(args[0], Individual) and len(args) == 1:
tmp = args[0]
self.chromosome = tmp.chromosome.copy()
self.generation = tmp.generation
self.age = tmp.age
self.fitness = 0
elif len(args) + len(kwargs) >= 3 :
self.chromosome = args[0] if args[0] is not None else kwargs["chromosome"]
self.generation = args[1] if args[1] is not None else kwargs["generation"]
self.age = args[2] if args[2] is not None else kwargs["age"]
self.fitness = 0
else :
raise Exception("non sufficient arguments")
def mutate(self, t = 0.1, prob=None) :
if prob is None :
prob = np.random.rand(len(self.chromosome))
factor = prob < t
xor = lambda x, y : (x != y).astype(np.int32)
chromosome = xor(np.array(self.chromosome), factor)
generation = self.generation #+ 1
age = 0
self.grow()
return Individual(chromosome.tolist(), generation, age)
def grow(self) :
self.age += 1
def __setitem__(self,k,v) :
if type(k) == int :
self.chromosome[k] = v
elif type(k) == slice :
self.chromosome[k] = v
elif isinstance(k, Collection) :
s = pd.Series(self.chromosome)
s[k] = v
self.chromosome = s.values.tolist()
else :
raise Individual.KeyValueError("Cannot set chromosome with a key type is not int, slice or Collection")
def __getitem__(self,k) :
if type(k) == int :
return Individual(self.chromosome[k],self.generation,self.age)
elif type(k) == slice :
return Individual(self.chromosome[k],self.generation,self.age)
elif isinstance(k, Collection) :
s = pd.Series(self.chromosome)
return Individual(s.values.tolist(), self.generation, self.age)
else :
raise Individual.KeyValueError("Cannot get chromosome with a key type is not int, slice or Collection")
def __str__(self) :
if DEBUG == True :
return str([self.chromosome, self.generation, self.age])
else :
return str(self.chromosome)
# def __eq__(self, obj) :
# return self.chromosome == obj.chromosome
def __add__(self, another) :
# implement concatenation of two chromosome so it is not mutable addition.
chromosome = self.chromosome + another.chromosome
generation = self.generation + 1
age = 0
self.grow()
another.grow()
return Individual(chromosome, generation, age)
def __truediv__(self, div) :
# div : a number or a np.ndarray
chromosome = (np.array(self.chromosome) / div).astype(float).tolist()
generation = self.generation
age = self.age
return Individual(chromosome, generation, age)
def __mul__(self, mul) :
# mul : a number or a np.ndarray
chromosome = (np.array(self.chromosome) * mul).astype(float).tolist()
generation = self.generation
age = self.age
return Individual(chromosome, generation, age)
def indexOf(self, vals) :
if isinstance(vals, Collection) :
indices = []
for v in vals :
indices.append(self.chromosome.index(v))
return indices
else :
indices = []
for i in range(len(self.chromosome)) :
if self.chromosome[i] == vals :
indices.append(i)
return indices
def indexOfPositive(self) :
indices = []
for i in range(len(self.chromosome)) :
if self.chromosome[i] > 0 :
indices.append(i)
return indices
def copy(self) :
return deepcopy(self)
def sum(self) :
return sum(self.chromosome)
def __len__(self) :
return len(self.chromosome)
# def __eq__(self, another) :
# return "".join(self.chromosome) == "".join(another.chromosome)
class WeightedIndividual(Individual) :
def __init__(self, *args, **kwargs) :
Individual.__init__(self, *args, **kwargs)
self.cost = kwargs["cost"]
# self.reweigh()
def weights(self) :
indices = self.indexOfPositive()
w = [self.chromosome[i] for i in indices]
# for i in indices :
# w.append(self.chromosome[i])
return indices, np.array(w)
def reweigh(self) :
self.chromosome = (np.array(self.chromosome) / sum(self.chromosome) * self.cost).tolist()
def mutate(self, t = 0.1, prob=None) :
if prob is None :
prob = np.random.normal(0, 1, len(self.chromosome))
# prob = (prob - np.mean(prob)) / (np.max(prob) - np.min(prob))
prob = 0 + (1 - (0)) * (prob - np.min(prob)) / (np.max(prob) - np.min(prob))
# prob = np.random.rand(len(self.chromosome))
factor = []
for p in prob :
if abs(p) <= t :
factor.append(0)
else :
factor.append(p)
factor = np.array(factor)
# factor = prob if prob < t else 1
action = lambda x, p : (1 - p ) * x + p * ( self.cost - x )
chromosome = action(np.array(self.chromosome), factor)
# Equal Rights: make every gene has the proportion of cost as they own the weights
chromosome = (chromosome / sum(chromosome) * self.cost).tolist()
generation = self.generation + 1
age = 0
self.grow()
return WeightedIndividual(chromosome, generation, age, cost=self.cost)
def __getitem__(self,k) :
if type(k) == int :
return WeightedIndividual(self.chromosome[k],self.generation,self.age, cost=self.cost)
if type(k) == slice :
return WeightedIndividual(self.chromosome[k],self.generation,self.age, cost=self.cost)
elif isinstance(k, Collection) :
s = | pd.Series(self.chromosome) | pandas.Series |
import datetime
import pandas as pd
from pandas import DataFrame, Series
from pandas.api.extensions import ExtensionArray, ExtensionDtype
from pandas.api.extensions import register_extension_dtype
from qapandas.base import QAPandasBase
from enum import Enum
import numpy as np
class QACode(Enum):
orig = 0
auto = 1
manu = 2
gapf = 3
class QADtype(ExtensionDtype):
type = QACode
name = "qacode"
na_value = np.nan
@classmethod
def construct_from_string(cls, string):
if string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return QAArray
register_extension_dtype(QADtype)
class QAArray(ExtensionArray):
"""
Class wrapping a numpy array of QA bits and
holding the array-based implementations.
"""
_dtype = QADtype()
def __init__(self, data):
if isinstance(data, self.__class__):
data = data.data
elif not isinstance(data, np.ndarray):
raise TypeError(
"'data' should be array of qa objects."
)
elif not data.ndim == 1:
raise ValueError(
"'data' should be a 1-dimensional array of qa objects."
)
self.data = data
@property
def dtype(self):
return self._dtype
def __len__(self):
return self.shape[0]
def __getitem__(self, idx):
if isinstance(idx, numbers.Integral):
return self.data[idx]
# array-like, slice
if pd.api.types.is_list_like(idx):
# for pandas >= 1.0, validate and convert IntegerArray/BooleanArray
# to numpy array
if not pd.api.types.is_array_like(idx):
idx = pd.array(idx)
dtype = idx.dtype
if pd.api.types.is_bool_dtype(dtype):
idx = pd.api.indexers.check_bool_array_indexer(self, idx)
elif pd.api.types.is_integer_dtype(dtype):
idx = np.asarray(idx, dtype="int")
if isinstance(idx, (Iterable, slice)):
return GeometryArray(self.data[idx])
else:
raise TypeError("Index type not supported", idx)
def __setitem__(self, key, value):
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, (list, np.ndarray)):
value = from_shapely(value)
if isinstance(value, QAArray):
if isinstance(key, numbers.Integral):
raise ValueError("cannot set a single element with an array")
self.data[key] = value.data
elif isinstance(value, QACode) or _isna(value):
if _isna(value):
# internally only use None as missing value indicator
# but accept others
value = None
if isinstance(key, (list, np.ndarray)):
value_array = np.empty(1, dtype=object)
value_array[:] = [value]
self.data[key] = value_array
else:
self.data[key] = value
else:
raise TypeError(
"Value should be either a QACode or None, got %s" % str(value)
)
def _basic_config(self):
# defaults
self._raw = None
self._qa = None
self._qa_generated = False
self._history = []
TSTAMP = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self._history.append(f"{TSTAMP} - Initiated QA Series")
class QASeries(QAPandasBase, Series):
"""
A QADataFrame object is a pandas.DataFrame that has also has a reference of the
raw original data, a quality flag for values and a history.
"""
_metadata = ["_raw", "_qa", "_history"]
def __init__(self, *args, **kwargs):
super(QASeries, self).__init__(*args, **kwargs)
_basic_config(self)
self._raw = Series(*args, **kwargs)
@property
def _constructor(self):
return QASeries
@property
def _constructor_expanddim(self):
return QADataFrame
# def _wrapped_pandas_method(self, mtd, *args, **kwargs):
# """Wrap a generic pandas method to ensure it returns a QASeries"""
# val = getattr(super(QASeries, self), mtd)(*args, **kwargs)
# if type(val) == Series:
# val.__class__ = QASeries
# val._history = self._history
# val._invalidate_qadata()
# return val
# def __getitem__(self, key):
# return self._wrapped_pandas_method("__getitem__", key)
def __repr__(self):
return(f"{ | Series.__repr__(self) | pandas.Series.__repr__ |
import pandas as pd
import numpy as np
import matplotlib.colors
import matplotlib.pyplot as plt
import seaborn as sns
def get_substrate_info(substrate_string, colname, carbo_df):
"""Get values in a column of the carbohydrates spreadsheet based on a string-list of substrates.
Parameters:
substrate_string (str): list of substrates represented as a string with each value separated by "; "
colname (str): name of the column in the carbohydrates spreadsheet to access
carbo_df: dataframe of carbohydrates
Returns:
str: "; "-separated set (no repeats) of items in the column specified by colname for the rows specified by substrate_string
float: np.nan is returned if th
"""
if not | pd.isna(substrate_string) | pandas.isna |
# -*- coding: utf-8 -*-
"""Aggregating feed-in time series for the model regions.
SPDX-FileCopyrightText: 2016-2019 <NAME> <<EMAIL>>
SPDX-License-Identifier: MIT
"""
__copyright__ = "<NAME> <<EMAIL>>"
__license__ = "MIT"
# Python libraries
import logging
import os
import datetime
# External libraries
import pandas as pd
from workalendar.europe import Germany
# oemof packages
import oemof.tools.logger as logger
import demandlib.bdew as bdew
# internal modules
from reegis import config as cfg
from reegis import energy_balance
from reegis import demand_heat
import reegis.coastdat
import reegis.energy_balance
import reegis.geometries
import reegis.bmwi
import berlin_hp.my_open_e_quarter
def load_heat_data(filename=None, method='oeq', fill_frac_column=True,
region='berlin'):
if method == 'oeq':
if filename is None:
filename = cfg.get('oeq', 'results').format(region=region)
fn = os.path.join(cfg.get('paths', 'oeq'), filename)
if not os.path.isfile(fn):
berlin_hp.my_open_e_quarter.oeq()
data = pd.read_hdf(fn, method)
elif method == 'wt':
if filename is None:
filename = 'waermetool_berlin.hdf'
data = pd.HDFStore(os.path.join(cfg.get('paths', 'wt'), filename))
else:
logging.warning('No data file found.')
data = None
if fill_frac_column:
data = fill_fraction_column(data)
return data
def fill_fraction_column(data):
# Get the columns with the fraction of the fuel
frac_cols = [x for x in data.columns if 'frac_' in x]
# Divide columns with 100 to get the fraction instead of percentage
data[frac_cols] = data[frac_cols].div(100)
# Sum up columns to check if the sum is 1.
data['check'] = data[frac_cols].sum(axis=1)
# Level columns if sum is between 0.95 and 1.
data.loc[data['check'] > 0.95, frac_cols] = (
data.loc[data['check'] > 0.95, frac_cols].multiply(
(1 / data.loc[data['check'] > 0.95, 'check']), axis=0))
# Update the check column.
data['check'] = data[frac_cols].sum(axis=1)
# Get the average values for each fraction
length = len(data.loc[round(data['check']) == 1, frac_cols])
s = data.loc[data['check'] > 0.95, frac_cols].sum() / length
# add average values if fraction is missing
data.loc[data['check'] < 0.1, frac_cols] = (
data.loc[data['check'] < 0.1, frac_cols] + s)
# Update the check column.
data['check'] = data[frac_cols].sum(axis=1)
length = float(len(data['check']))
check_sum = data['check'].sum()
if check_sum > length + 1 or check_sum < length - 1:
logging.warning("The fraction columns do not equalise 1.")
return data
def demand_by(data, demand_column, heating_systems=None,
building_types=None, remove_string='', percentage=False):
"""
Adds a new table to the hdf-file where the demand is divided by
building types and/or heating systems.
Parameters
----------
data : pandas.DataFrame
demand_column : string
Name of the column with the overall demand
heating_systems : list of strings
List of column names. The columns should contain the
fraction of each heating system. The sum of all these
columns should be 1 (or 100) for each row. If the sum is
100 the percentage parameter (prz) must be set to True
building_types : dictionary or None
All building types with their condition. If None no distinction
between building types.
remove_string : string
Part of the column names of the heating systems that
should be removed to name the results. If the column is
name "fraction_of_district_heating" the string could be
"fraction_of_" to use just "district_heating" for the name
of the result column.
percentage : boolean
True if the fraction of the heating system columns sums up
to hundred instead of one.
Returns
-------
"""
if percentage:
prz = 100
else:
prz = 1
# if building_types is None all building will be fetched
if building_types is None:
building_types = {'all': '{0} == {0}'.format(demand_column)}
# Create an empty DataFrame with the same index as the data DataFrame
demand_by_building = pd.DataFrame(
index=data.index)
# Loop over the building types and use the condition to filter.
# The value from demand column is written into the new condition
# column if the condition is true
for btype, condition in building_types.items():
demand_by_building.loc[data.query(
condition).index, btype] = (
data[demand_column][data.query(condition).index])
# Create an empty DataFrame with the same index as the data DataFrame
demand = pd.DataFrame(index=data.index)
# Get the columns from the buildings condition
loop_list = demand_by_building.keys()
# If heating system is None do not filter.
if heating_systems is None:
heating_systems = []
loop_list = []
logging.error(
"Demand_by without heating systems is not implemented")
blist = list()
for btype in loop_list:
# Create renaming dictionary
rename_dict = {
col: 'demand_' + btype + '_' + col.replace(
remove_string, '')
for col in heating_systems}
# Multiply each buildings column with the heating system fraction
demand = demand.combine_first(
data[heating_systems].multiply(
demand_by_building[btype], axis='index').div(prz))
# Rename the columns
demand = demand.rename(columns=rename_dict)
# Create a list with name of building columns with
blist.extend(list((btype, )) * len(heating_systems))
hlist = heating_systems * len(set(blist))
multindex = pd.MultiIndex.from_tuples(list(zip(blist, hlist)),
names=['first', 'second'])
return pd.DataFrame(data=demand.as_matrix(), columns=multindex,
index=data.index)
def dissolve(data, level, columns=None):
"""
Parameters
----------
data : pandas.DataFrame
level : integer or string
1 = district, 2 = prognoseraum, 3 = bezirksregion, 4 = planungsraum
columns : string or list
Name of the column in the given table.
Returns
-------
pandas.Series
Dissolved Column.
"""
if columns is None:
columns = list(data.columns)
data['lor'] = data['lor'].astype(str).str.zfill(8)
error_level = level
if isinstance(level, str):
trans_dict = {'bezirk': 1,
'prognoseraum': 2,
'bezirksregion': 3,
'planungsraum': 4}
level = trans_dict.get(level)
if level is None:
logging.error("Wrong level: {0}".format(error_level))
level *= 2
results = data.groupby(data['lor'].str[:level])[columns].sum()
# self.annual_demand = results
return results
def get_end_energy_data(year, state='BE'):
"""End energy demand from energy balance (reegis)
"""
filename_heat_reference = os.path.join(
cfg.get('paths', 'oeq'), 'heat_reference_TJ_{0}_{1}.csv'.format(
year, state))
if not os.path.isfile(filename_heat_reference):
eb = energy_balance.get_usage_balance(
year=year, grouped=True)
end_energy_table = eb.loc[state]
end_energy_table.to_csv(filename_heat_reference)
else:
end_energy_table = pd.read_csv(filename_heat_reference, index_col=[0])
return end_energy_table
def get_district_heating_areas():
"""TODO: Die Erstellung der Karte ist derzeit 'nur' beschrieben. Die
Erstellung fehlt noch.
Read map with areas of district heating systems in Berlin
This map is the results of the intersection of the block map with the
district heat systems map.
"""
dh_filename = os.path.join(
cfg.get('paths', 'data_berlin'),
cfg.get('district_heating', 'map_district_heating_areas'))
distr_heat_areas = pd.read_csv(dh_filename, index_col=[0])
# Replace alphanumeric code from block id
distr_heat_areas['gml_id'] = distr_heat_areas['gml_id'].str.replace(
's_ISU5_2015_UA.', '')
return distr_heat_areas
def create_standardised_heat_load_profile(shlp, year):
"""
Parameters
----------
shlp : dict
year : int
Returns
-------
pandas.DataFrame
"""
avg_temp_berlin = (reegis.coastdat.federal_state_average_weather(
year, 'temp_air')['BE'])
# Calculate the average temperature in degree Celsius
temperature = avg_temp_berlin - 272.15
# Fetch the holidays of Germany from the workalendar package
cal = Germany()
holidays = dict(cal.holidays(year))
profile_type = pd.DataFrame()
for shlp_type in shlp.keys():
shlp_name = str(shlp_type)
profile_type[shlp_name] = bdew.HeatBuilding(
temperature.index, holidays=holidays, temperature=temperature,
shlp_type=shlp_type, wind_class=0,
building_class=shlp[shlp_type]['build_class'],
annual_heat_demand=1000,
name=shlp_name, ww_incl=True).get_bdew_profile()
return profile_type
def create_heat_profiles(year, region='berlin'):
"""Create heat_profiles for the basic scenario as time series in MW.
- district heating time series for the different district heating systems
- decentralised heating demand time series for different fuels
Parameters
----------
year : int
The year of the basic scenario.
region : str or int
Region or LOR to load the heat data from.
Returns
-------
pandas.DataFrame
"""
logging.info("Creating heat profiles...")
# allocation of district heating systems (map) to groups (model)
district_heating_groups = cfg.get_dict('district_heating_systems')
# A file with a heat factor for each building type of the alkis
# classification. Buildings like garages etc get the heat-factor 0. It is
# possible to define building factors between 0 and 1.
filename_heat_factor = os.path.join(
cfg.get('paths', 'data_berlin'),
cfg.get('oeq', 'alkis_heat_factor_table'))
heat_factor = pd.read_csv(filename_heat_factor, index_col=[0])
del heat_factor['gebaeude_1']
# heat demand for each building from open_e_quarter
data = load_heat_data()
# Every building has a block id from the block the building is located.
# Every block that touches a district heating area has the STIFT (number)
# of this district heating system. By merging this information every
# building gets the STIFT (number) of the district heating area.
# areas of district heating systems in Berlin
distr_heat_areas = get_district_heating_areas()
data = data.merge(distr_heat_areas[['gml_id', 'STIFT']],
left_on='block', right_on='gml_id', how='left')
# Merge the heat-factor for each building type to the alkis types
data = data.merge(heat_factor, left_on='building_function',
right_index=True)
data = data[['block', 'lor', 'frac_elec', 'frac_district_heating',
'frac_gas', 'frac_oil', 'frac_coal', 'HLAC', 'HLAP', 'AHDC',
'AHDP', 'my_total', 'check', 'gml_id', 'STIFT',
'heat_factor', 'ghd', 'mfh']]
# Multiply the heat demand of the buildings with the heat factor
data['total'] = data['my_total'] * data['heat_factor']
data['lor'] = data.lor.apply(str)
# if region != 'berlin':
# berlin_total = data.total.sum()
# data['lor'] = data.lor.apply(str)
# data = data.loc[data.lor.str.startswith(str(region))]
# region_factor = data.total.sum() / berlin_total
# else:
# region_factor = 1
# Level the overall heat demand with the heat demand from the energy
# balance. Get energy balance first.
end_energy_table = demand_heat.heat_demand(year).loc['BE']
# bmwi_table = reegis.bmwi.read_bmwi_sheet_7()
tab_a = reegis.bmwi.read_bmwi_sheet_7('a')
tab_b = reegis.bmwi.read_bmwi_sheet_7('b')
# calculate the fraction of process energy and building heat (with dhw)
heat_process = {}
p = 'sonstige Prozesswärme'
r = 'Raumwärme'
w = 'Warmwasser'
s = 'private Haushalte'
heat_process['domestic'] = (tab_b.loc[(s, p, p), year] / (
tab_b.loc[(s, p, p), 2014] +
tab_b.loc[(s, r, r), 2014] +
tab_b.loc[(s, w, w), 2014]))
s = '<NAME> '
heat_process['retail'] = (tab_b.loc[(s, p, p), year] / (
tab_b.loc[(s, p, p), 2014] +
tab_b.loc[(s, r, r), 2014] +
tab_b.loc[(s, w, w), 2014]))
s = 'Industrie'
heat_process['industrial'] = (tab_a.loc[(s, p, p), year] / (
tab_a.loc[(s, p, p), 2014] +
tab_a.loc[(s, r, r), 2014] +
tab_a.loc[(s, w, w), 2014]))
# multiply the energy balance with the building/process fraction
profile_type = | pd.DataFrame(columns=end_energy_table.columns) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 10:15:25 2021
@author: lenakilian
"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import geopandas as gpd
ghg_year = 2015 # 2017
wd = r'/Users/lenakilian/Documents/Ausbildung/UoLeeds/PhD/Analysis/'
years = list(range(2007, 2018, 2))
geog = 'MSOA'
dict_cat = 'category_8'
lookup = pd.read_csv(wd + 'data/raw/Geography/Conversion_Lookups/UK_full_lookup_2001_to_2011.csv')\
[['MSOA11CD', 'MSOA01CD', 'RGN11NM']].drop_duplicates()
ew_shp = gpd.read_file(wd + 'data/raw/Geography/Shapefiles/EnglandWales/msoa_2011_ew.shp')\
.set_index('msoa11cd').join(lookup.set_index('MSOA11CD'), how='left')
lon_shp = ew_shp.loc[ew_shp['RGN11NM'] == 'London']
emissions = {}
for year in years:
year_difference = years[1] - years[0]
year_str = str(year) + '-' + str(year + year_difference - 1)
emissions[year] = pd.read_csv(wd + 'data/processed/GHG_Estimates/' + geog + '_' + year_str + '.csv', index_col=0)
# income data
income = {}
income[2017] = | pd.read_csv(wd + 'data/raw/Income_Data/equivalised_income_2017-18.csv', header=4, encoding='latin1') | pandas.read_csv |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
def main():
df = | pd.read_csv('../../data/complete_df_7.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
# 検体検査結果データ(患者ごと)の読み込みと検体検査結果データ(検査項目ごと)の出力
# └→RS_Base_laboファイル
#
# 入力ファイル
# └→患者マスターファイル :name.csv
# └→検体検査結果データファイル:患者ID.txt(例:101.txt,102.txt,103.txt・・・)
#
# Create 2017/07/09 : Update 2017/07/09
# Auther Katsumi.Oshiro
import csv # csvモジュールの読み込み(CSVファイルの読み書き)
import glob # globモジュールの読み込み(ファイル名のパターンマッチング)
import pandas as pd # pandasモジュールの読み込み
import os # osモジュールの読み込み
print('# RS_Base_laboデータによる検体検査データの作成(START)')
# 辞書(患者ID、生年月日)の作成
birth = {}
# 患者マスター(name.csv)の読み込み
count = 0
with open('../data/name.csv', 'r')as f:
reader = csv.reader(f)
for row in reader:
# print(row[0],row[1],row[2],row[3])
birth.update({row[0]:row[3]})
count += 1
print('# 患者マスター読み込み件数--------->', count)
# 辞書(birth):生年月日の検索テスト(患者ID:679)
print('# 辞書テスト:患者ID:679の生年月日->', birth["679"])
# 年齢計算テスト(患者ID:679)
today = int( | pd.to_datetime('today') | pandas.to_datetime |
__author__ = 'qchasserieau'
import json
import time
import warnings
from random import random
import numpy as np
import pandas as pd
import pyproj
import requests
import shapely
from tqdm import tqdm
try:
from geopy.distance import geodesic # works for geopy version >=2
except ImportError:
warnings.warn('Your geopy version is <2 while the latest available version is >=2', FutureWarning)
from geopy.distance import vincenty as geodesic # works for geopy version <2
from syspy.spatial import spatial
wgs84 = pyproj.Proj("EPSG:4326")
def dist_from_row(row, projection=wgs84):
"""
Uses vincenty formula to calculate the euclidean distances of an origin-destination pair.
:param row: a pd.Series containing the coordinates of the origin and the destination
:type row: pd.Series
:param projection: projection of the zoning
:type projection: pyproj.Proj
:return: euclidean_distance: euclidean distance of the origin-destination
:rtype: int
"""
coordinates_origin = (pyproj.transform(projection, wgs84, row['x_origin'], row['y_origin']))
coordinates_origin = (coordinates_origin[1], coordinates_origin[0])
coordinates_destination = (pyproj.transform(projection, wgs84, row['x_destination'], row['y_destination']))
coordinates_destination = (coordinates_destination[1], coordinates_destination[0])
return geodesic(coordinates_origin, coordinates_destination).m
def euclidean(zones, coordinates_unit='degree', projection=wgs84, epsg=False, method='numpy', origins=False, destinations=False,
latitude=False, longitude=False, intrazonal=False):
"""
Calculates the euclidean distances between each origin-destination pair of a zoning.
If the coordinates are in degree, the Vincenty formula is used.
:param zones: a shape dataframe containing the geometries (polygons) of the zoning
:type zones: pd.DataFrame
:param coordinates_unit: degree or meter
:type coordinates_unit: str
:param origins: a list of id of the zones from which the euclidean distance is needed
:type origins: list
:param destinations: a list of id of the zones to which the euclidean distance is needed
:type destination: list
:param method: 'numpy' or 'vincenty' numpy is faster but only handles wgs84 epsg 4326
:type method: str
:param projection: projection of the zoning
:type projection: pyproj.Proj
:param epsg: epsg code of the projection, if given, the projection arg is overwritten
:type projection: int or str
:param intrazonal: (bool), if True a non-zero intrazonal distance is computed.
In this case an intrazonal projection system must be provided
:return: euclidean_distance_dataframe: a pd.DataFrame with the coordinates of the centroids
and the euclidean distances between the zones
:rtype: pd.DataFrame
"""
projection = pyproj.Proj("+init=EPSG:" + str(epsg)) if epsg else projection
if 'geometry' in zones.columns:
z = zones[['geometry']].copy()
z['x'] = z['geometry'].apply(lambda g: g.centroid.coords[0][0])
z['y'] = z['geometry'].apply(lambda g: g.centroid.coords[0][1])
z.drop(['geometry'], axis=1, inplace=True)
elif bool(latitude) & bool(longitude):
z = zones[[latitude, longitude]].copy()
z['x'] = z[longitude]
z['y'] = z[latitude]
else:
print('If the DataFrame has no "geometry" field, longitude and latitude should be provided')
# zones_destination = zones_destination if zones_destination
iterables = [zones.index] * 2
od = pd.DataFrame(index=pd.MultiIndex.from_product(iterables, names=['origin', 'destination'])).reset_index()
od = pd.merge(od, z, left_on='origin', right_index=True)
od = pd.merge(od, z, left_on='destination', right_index=True, suffixes=['_origin', '_destination'])
if origins:
od = od[od['origin'].isin(origins)]
if destinations:
od = od[od['destination'].isin(destinations)]
# Compute distance
if coordinates_unit == 'degree':
if method == 'numpy':
columns = ['x_origin', 'y_origin', 'x_destination', 'y_destination']
od['euclidean_distance'] = get_distance_from_lon_lat_in_m(*[od[s] for s in columns])
else:
od['euclidean_distance'] = od.apply(dist_from_row, axis=1, args={projection})
elif coordinates_unit == 'meter':
od['euclidean_distance'] = np.sqrt(
(od['x_origin'] - od['x_destination'])**2
+ (od['y_origin'] - od['y_destination'])**2
)
else:
raise('Invalid coordinates_unit.')
if intrazonal:
for i in od.index:
if od['origin'][i] == od['destination'][i]:
od['euclidean_distance'][i] = np.sqrt(zones['area'][od['origin'][i]]) / 2
return od[['origin', 'destination', 'euclidean_distance', 'x_origin', 'y_origin', 'x_destination', 'y_destination']]
# google maps ################################################
def all_skim_matrix(zones=None, token=None, od_matrix=None, coordinates_unit='degree', **skim_matrix_kwargs):
if od_matrix is not None:
df = od_matrix.copy()
else:
df = euclidean(zones, coordinates_unit=coordinates_unit)
try:
assert token is not None
if isinstance(token, str):
token = [token, token] # il semble que l'on vide trop tôt la pile
t = token.pop()
print('Building driving skims matrix with Google API.')
skim_lists = []
rows = tqdm(list(df.iterrows()), 'skim matrix')
for index, row in rows:
computed = False
while computed is False and token:
try:
skim_lists.append(
driving_skims_from_row(
row,
t,
**skim_matrix_kwargs
)
)
computed = True
except TokenError as e:
print(e)
try:
t = token.pop()
print('Popped:', t)
except Exception:
print('Could not complete the skim matrix computation: not enough credentials.')
df[['distance', 'duration', 'duration_in_traffic']] = pd.DataFrame(skim_lists)
print('Done')
except IndexError as e:
print('Exception [%s] occured' % e)
print('WARNING: the build of the real skim matrix has failed.')
df[['distance', 'duration']] = df.apply(
pseudo_driving_skims_from_row, args=[token], axis=1)
print('A random one has been generated instead to allow testing of the next steps.')
return df
def skim_matrix(zones, token, n_clusters, coordinates_unit='degree', skim_matrix_kwargs={}):
clusters, cluster_series = spatial.zone_clusters(zones, n_clusters, 1e-9)
cluster_euclidean = all_skim_matrix(
clusters,
token,
coordinates_unit=coordinates_unit,
**skim_matrix_kwargs
)
df = euclidean(zones, coordinates_unit=coordinates_unit)
df = pd.merge(
df,
pd.DataFrame(cluster_series),
left_on='origin',
right_index=True)
df = pd.merge(
df,
| pd.DataFrame(cluster_series) | pandas.DataFrame |
import collections
import pandas as pd
big_list = [[{'автопродление': 1},
{'аккаунт': 1},
{'акция': 2},
{'безумный': 1},
{'бесплатно': 1},
{'бесплатнои': 1},
{'бесплатныи': 1},
{'бесплатный': 1},
{'бесценок': 1},
{'билет': 2},
{'бритва': 1},
{'бритвеныи': 1},
{'важный': 2},
{'вводить': 1},
{'деиствует': 1},
{'забудь': 1},
{'заполнять': 1},
{'заходить': 1},
{'заявка': 1},
{'идти': 1},
{'канал': 1},
{'карта': 1},
{'кино': 2},
{'кинопоиск': 1},
{'ленись': 1},
{'наидете': 1},
{'неделя': 1},
{'новыи': 1},
{'отключить': 1},
{'пара': 1},
{'первый': 1},
{'переходить': 1},
{'подписка': 2},
{'подписываися': 1},
{'покупка': 2},
{'покупке': 1},
{'получать': 1},
{'получение': 1},
{'почту': 1},
{'премиум': 1},
{'привязывать': 1},
{'прийти': 1},
{'промо': 1},
{'промокоду': 1},
{'регистрировать': 1},
{'регистрируемся': 1},
{'саит': 1},
{'сеичас': 1},
{'скидка': 2},
{'совершенно': 1},
{'станок': 1},
{'телеграм': 1},
{'экономить': 1}],
[{'неделя': 1},
{'получать': 1},
{'саит': 1},
{'скидка': 6},
{'автоматически': 1},
{'антивирус': 1},
{'антивирусы': 1},
{'бит': 1},
{'возможность': 1},
{'временной': 1},
{'выбрать': 1},
{'даваите': 1},
{'деиствительно': 1},
{'деиствия': 1},
{'деиствовать': 1},
{'дополнительнои': 1},
{'дополнительный': 1},
{'других': 1},
{'другое': 1},
{'ждать': 1},
{'запись': 1},
{'запустить': 1},
{'защитный': 1},
{'использовать': 1},
{'ключ': 2},
{'код': 3},
{'компьютер': 1},
{'мочь': 1},
{'наиболее': 1},
{'новость': 1},
{'обеспечение': 4},
{'обновить': 1},
{'ограничить': 2},
{'отличный': 1},
{'парк': 1},
{'планировать': 1},
{'полугодовой': 1},
{'получить': 1},
{'популярный': 1},
{'посмотреть': 1},
{'предложение': 1},
{'применение': 1},
{'программный': 4},
{'продукт': 2},
{'распродажа': 2},
{'саите': 1},
{'скидкои': 1},
{'следующии': 1},
{'следующий': 1},
{'снижение': 1},
{'специальный': 1},
{'срок': 1},
{'супер': 2},
{'течение': 1},
{'упустить': 1},
{'устроиств': 1},
{'устроиства': 1},
{'учётный': 1},
{'хотеть': 1},
{'цена': 9}],
[{'наидете': 1},
{'неделя': 1},
{'первый': 2},
{'скидка': 4},
{'деиствительно': 2},
{'других': 1},
{'предложение': 2},
{'распродажа': 2},
{'снижение': 1},
{'цена': 5},
{'instagram': 1},
{'twitter': 1},
{'большинство': 1},
{'бренд': 1},
{'верить': 1},
{'вернее': 1},
{'вид': 1},
{'видео': 2},
{'витрина': 1},
{'витринный': 1},
{'выгодный': 1},
{'гарантию': 1},
{'делать': 1},
{'день': 1},
{'диктофон': 1},
{'другои': 1},
{'жж': 1},
{'закрываться': 2},
{'интересный': 1},
{'каждыи': 1},
{'количество': 1},
{'кстати': 1},
{'купить': 1},
{'логотип': 1},
{'магазин': 2},
{'маркет': 1},
{'медиамаркт': 1},
{'наидется': 1},
{'наидутся': 1},
{'например': 1},
{'находиться': 1},
{'небольшой': 3},
{'недавно': 1},
{'низкий': 2},
{'обещать': 2},
{'обман': 1},
{'общий': 1},
{'остаться': 2},
{'осуществлять': 1},
{'пестреть': 1},
{'писать': 1},
{'повыбирать': 1},
{'позиция': 1},
{'понадобиться': 1},
{'посетителеи': 1},
{'правда': 1},
{'правильно': 1},
{'продавать': 1},
{'производитель': 1},
{'размер': 1},
{'распродажный': 1},
{'рекламировать': 1},
{'связь': 1},
{'сервис': 1},
{'скореи': 1},
{'случай': 4},
{'случиться': 1},
{'сменить': 1},
{'смотреть': 1},
{'событие': 1},
{'сообщение': 1},
{'сообщить': 1},
{'соцсеть': 2},
{'сравниваите': 1},
{'сравнивать': 1},
{'старт': 1},
{'существенно': 1},
{'товар': 2},
{'трансляция': 2},
{'тщательно': 1},
{'увеличивать': 1},
{'уменьшаться': 1},
{'уникальныи': 1},
{'финальный': 1},
{'ходовой': 1},
{'центр': 1},
{'экземпляр': 1}],
[{'покупка': 1},
{'выбрать': 1},
{'продукт': 1},
{'саите': 2},
{'магазин': 1},
{'сервис': 1},
{'товар': 3},
{'уникальныи': 1},
{'брать': 2},
{'выбор': 1},
{'выкуп': 1},
{'груз': 1},
{'днеи': 1},
{'забота': 2},
{'заказ': 2},
{'заниматься': 1},
{'интернет': 3},
{'каталог': 2},
{'категория': 1},
{'мелко': 1},
{'мск': 1},
{'набор': 2},
{'нужный': 1},
{'объединение': 1},
{'оставить': 1},
{'остальные': 1},
{'откроить': 1},
{'оформление': 1},
{'параметр': 1},
{'перепаковке': 1},
{'подарочныи': 1},
{'подарочный': 1},
{'поддержка': 1},
{'полностью': 1},
{'полныи': 1},
{'посылка': 1},
{'праздничный': 1},
{'разный': 1},
{'сделать': 1},
{'служба': 1},
{'соблюдение': 1},
{'собрать': 1},
{'ссылка': 1},
{'таможенный': 1},
{'телефон': 1},
{'требовании': 1},
{'удобныи': 1},
{'указание': 1},
{'шопинг': 1}],
[{'канал': 1},
{'мочь': 1},
{'цена': 1},
{'видео': 1},
{'смотреть': 1},
{'товар': 4},
{'ссылка': 1},
{'безусловно': 1},
{'большои': 1},
{'боцманскии': 1},
{'вариант': 1},
{'внутренний': 1},
{'военнои': 1},
{'возможный': 1},
{'входить': 1},
{'глаз': 1},
{'дерево': 1},
{'довольно': 1},
{'доступный': 1},
{'друг': 1},
{'жми': 1},
{'защёлка': 1},
{'иметь': 2},
{'инструмент': 1},
{'карман': 1},
{'классный': 1},
{'кольцо': 1},
{'комплект': 1},
{'которои': 1},
{'крепление': 1},
{'крутой': 2},
{'лезвие': 1},
{'марлина': 1},
{'металического': 1},
{'металом': 1},
{'модификациеи': 1},
{'молния': 1},
{'морской': 1},
{'мужик': 1},
{'мужчик': 1},
{'наидет': 1},
{'наити': 1},
{'найти': 1},
{'накладка': 1},
{'наличие': 1},
{'настоящий': 1},
{'начать': 1},
{'нежелательный': 1},
{'необходимый': 1},
{'нержавеики': 1},
{'нож': 2},
{'основнои': 1},
{'основный': 1},
{'особенность': 1},
{'отличительнои': 1},
{'палированным': 1},
{'пластик': 1},
{'поддеть': 1},
{'популярнои': 1},
{'потаиным': 1},
{'поэтому': 1},
{'правило': 1},
{'представлять': 1},
{'преимущество': 1},
{'привет': 1},
{'простота': 1},
{'работа': 1},
{'ремень': 6},
{'ремня': 1},
{'рукоятка': 1},
{'самое': 1},
{'связке': 1},
{'складный': 1},
{'слишком': 1},
{'смочь': 1},
{'собои': 1},
{'сокровенный': 1},
{'статья': 1},
{'страховочный': 1},
{'таиника': 1},
{'таиником': 1},
{'такои': 1},
{'твёрдый': 1},
{'тканевыи': 1},
{'толстыи': 1},
{'топчик': 1},
{'увидеть': 1},
{'узел': 1},
{'часть': 1},
{'шип': 1},
{'являться': 2}],
[{'канал': 1},
{'покупка': 1},
{'сеичас': 1},
{'скидка': 5},
{'других': 1},
{'супер': 1},
{'товар': 3},
{'нужный': 1},
{'подарочныи': 1},
{'подарочный': 1},
{'разный': 1},
{'ремень': 1},
{'барсучий': 1},
{'благородный': 1},
{'больший': 1},
{'бритьё': 1},
{'быстрый': 1},
{'восторженный': 1},
{'вставка': 1},
{'выделка': 1},
{'выполнить': 1},
{'высокий': 1},
{'год': 1},
{'двоиными': 1},
{'длина': 1},
{'добавить': 1},
{'документ': 1},
{'доставка': 1},
{'древесина': 1},
{'дужки': 1},
{'зажимами': 1},
{'защитои': 1},
{'зеркальный': 1},
{'изготовить': 1},
{'исполнение': 1},
{'качество': 1},
{'кисть': 2},
{'клапанах': 1},
{'ключеи': 1},
{'кожа': 1},
{'кожаный': 2},
{'комфортный': 1},
{'коричневыи': 1},
{'коробка': 1},
{'кошелёк': 1},
{'красивый': 1},
{'красота': 1},
{'крем': 1},
{'круглый': 1},
{'лаик': 1},
{'линза': 1},
{'лицо': 1},
{'материал': 2},
{'мелочеи': 1},
{'металлическии': 1},
{'металлический': 2},
{'мех': 1},
{'моделеи': 1},
{'модель': 1},
{'модный': 1},
{'молниях': 1},
{'мужской': 1},
{'мужчина': 2},
{'накладками': 1},
{'нанесение': 2},
{'наплечныи': 1},
{'наслаждение': 1},
{'натуральный': 1},
{'нежный': 1},
{'новинка': 1},
{'ноутбук': 1},
{'оправа': 1},
{'отделение': 2},
{'отзыв': 2},
{'отзывы': 1},
{'отличнои': 1},
{'очень': 2},
{'очки': 1},
{'пена': 2},
{'плохой': 1},
{'подписываитесь': 1},
{'подтяжка': 1},
{'покупателеи': 1},
{'покупатель': 1},
{'полный': 1},
{'помазок': 1},
{'понравиться': 1},
{'портфель': 1},
{'превращаться': 1},
{'прекрасныи': 1},
{'прекрасный': 1},
{'признателен': 1},
{'продавец': 1},
{'пружинои': 1},
{'рекомендовать': 2},
{'ретро': 1},
{'решение': 1},
{'ручка': 2},
{'сантиметр': 2},
{'сдержанный': 1},
{'сегодня': 1},
{'спандекс': 1},
{'сплава': 1},
{'стекло': 1},
{'стиль': 1},
{'стильный': 1},
{'сумка': 1},
{'темно': 1},
{'тысяча': 1},
{'удобный': 2},
{'удобство': 1},
{'удовольствие': 1},
{'ультрафиолет': 1},
{'упаковать': 2},
{'фотохромный': 1},
{'футляр': 1},
{'хороший': 1},
{'худой': 1},
{'цвет': 1},
{'цветовой': 1},
{'цинк': 1},
{'черныи': 1},
{'ширина': 1},
{'эластичныи': 1}],
[{'покупка': 4},
{'даваите': 1},
{'использовать': 1},
{'посмотреть': 2},
{'цена': 2},
{'интересный': 1},
{'магазин': 2},
{'товар': 5},
{'набор': 2},
{'разный': 1},
{'самое': 1},
{'складный': 1},
{'статья': 1},
{'качество': 1},
{'кожа': 1},
{'коробка': 1},
{'крем': 1},
{'новинка': 7},
{'подписываитесь': 1},
{'цвет': 4},
{'автомобилист': 1},
{'апрель': 4},
{'аромат': 1},
{'ассортимент': 2},
{'банныи': 1},
{'бельё': 1},
{'блокноты': 1},
{'вакуумный': 1},
{'весёлый': 1},
{'волос': 1},
{'гель': 1},
{'гигиена': 1},
{'горшки': 1},
{'губка': 1},
{'дача': 1},
{'двухъярусная': 1},
{'детеи': 1},
{'детский': 2},
{'дизаинами': 1},
{'дизаины': 1},
{'дом': 2},
{'душе': 1},
{'желать': 1},
{'забываите': 1},
{'завезти': 1},
{'завершить': 1},
{'зеркало': 1},
{'зонт': 1},
{'иванов': 1},
{'игрушка': 4},
{'идея': 1},
{'канцелярия': 1},
{'кинетический': 1},
{'клавиатура': 1},
{'компас': 1},
{'конец': 2},
{'конструктор': 1},
{'копилка': 1},
{'корзина': 1},
{'коробочка': 1},
{'косметика': 2},
{'крышкои': 1},
{'лаванда': 1},
{'лаики': 1},
{'летний': 1},
{'магнитик': 1},
{'март': 6},
{'мочалка': 1},
{'мытьё': 1},
{'надувной': 1},
{'наносить': 1},
{'начало': 1},
{'новинками': 1},
{'новый': 1},
{'обзор': 9},
{'отдел': 1},
{'отделе': 1},
{'отдых': 1},
{'отсек': 1},
{'пакет': 1},
{'песок': 1},
{'песочница': 1},
{'подарок': 1},
{'подготовить': 1},
{'подробныи': 1},
{'полезный': 1},
{'полка': 1},
{'полотенце': 2},
{'полочка': 1},
{'постельный': 1},
{'посуда': 3},
{'появиться': 3},
{'предполагать': 1},
{'представить': 2},
{'приятный': 1},
{'проводной': 1},
{'проидемся': 1},
{'производство': 1},
{'пропустить': 1},
{'просмотр': 1},
{'простынь': 1},
{'прямо': 1},
{'пятёрочка': 3},
{'ремешок': 1},
{'роза': 1},
{'рублеи': 14},
{'светодиодныи': 1},
{'сказать': 1},
{'см': 2},
{'снова': 2},
{'сожаление': 1},
{'состав': 1},
{'спасибо': 1},
{'ставить': 1},
{'страничка': 1},
{'сушка': 1},
{'творчество': 1},
{'тело': 1},
{'трость': 1},
{'удачный': 1},
{'указать': 2},
{'уход': 2},
{'хранение': 2},
{'цветок': 1},
{'цифровой': 1},
{'читаите': 1},
{'щётка': 1}],
[{'покупка': 3},
{'деиствительно': 1},
{'дополнительнои': 1},
{'получить': 1},
{'цена': 4},
{'выгодный': 3},
{'купить': 4},
{'магазин': 5},
{'продавать': 1},
{'товар': 2},
{'заказ': 1},
{'интернет': 2},
{'комплект': 2},
{'смочь': 2},
{'покупатель': 1},
{'желать': 1},
{'приятный': 1},
{'рублеи': 2},
{'база': 1},
{'батарейка': 1},
{'быстро': 1},
{'вагин': 6},
{'вагины': 1},
{'вибрациеи': 5},
{'внимание': 1},
{'волосик': 1},
{'вставляться': 1},
{'выгоднои': 1},
{'выносной': 1},
{'джанин': 8},
{'известнои': 1},
{'интим': 1},
{'качественныи': 1},
{'лицензионныи': 1},
{'лобке': 1},
{'любрикант': 1},
{'максимально': 1},
{'название': 1},
{'недорого': 1},
{'описание': 1},
{'особый': 1},
{'отверстие': 1},
{'оформить': 1},
{'пальчиковый': 1},
{'положить': 1},
{'порнозвезды': 1},
{'пульт': 1},
{'работать': 1},
{'светлый': 1},
{'секс': 2},
{'слепок': 1},
{'совершение': 1},
{'стимуляция': 1},
{'тип': 1},
{'уважаемые': 1},
{'яицо': 1}],
[{'планировать': 1},
{'цена': 2},
{'продавать': 4},
{'экземпляр': 1},
{'модель': 1},
{'очень': 3},
{'рублеи': 1},
{'спасибо': 1},
{'акрил': 1},
{'бахроме': 1},
{'белыи': 1},
{'буклированные': 1},
{'вещь': 1},
{'длинныи': 2},
{'достаточно': 1},
{'единственный': 1},
{'изменю': 1},
{'метр': 1},
{'моеи': 1},
{'мягкий': 1},
{'наматываться': 1},
{'нежныи': 1},
{'неузнаваемость': 1},
{'нитка': 2},
{'огромный': 1},
{'оксана': 1},
{'повтор': 1},
{'повторю': 1},
{'пушистый': 1},
{'радуга': 1},
{'руб': 3},
{'сиреневыи': 1},
{'тонкии': 1},
{'фиолетовый': 1},
{'черно': 1},
{'шарф': 2},
{'шею': 1}],
[{'срок': 1},
{'цена': 1},
{'другои': 1},
{'днеи': 1},
{'заказ': 1},
{'оформление': 1},
{'работа': 1},
{'длина': 1},
{'модель': 1},
{'цвет': 3},
{'рублеи': 1},
{'см': 1},
{'нитка': 1},
{'шарф': 1},
{'белый': 1},
{'выполню': 1},
{'двустороннии': 1},
{'двухслоиныи': 1},
{'красный': 1},
{'крючок': 1},
{'молот': 1},
{'надпись': 1},
{'однои': 1},
{'подарить': 1},
{'пряжи': 1},
{'связать': 1},
{'серп': 1},
{'сторона': 1},
{'шерстянои': 1},
{'шерстяной': 1}],
[{'других': 1},
{'хотеть': 2},
{'цена': 2},
{'купить': 2},
{'размер': 1},
{'товар': 4},
{'брать': 1},
{'полностью': 1},
{'сделать': 1},
{'мех': 1},
{'приятный': 1},
{'рублеи': 1},
{'состав': 1},
{'руб': 1},
{'ангора': 1},
{'вопрос': 1},
{'гольф': 1},
{'дело': 1},
{'засунуть': 1},
{'знать': 1},
{'китае': 1},
{'место': 1},
{'меховой': 1},
{'новогодний': 1},
{'носок': 1},
{'ощупь': 1},
{'полиамид': 1},
{'полиэстер': 2},
{'рассчитать': 1},
{'рука': 1},
{'самом': 1},
{'светофор': 4},
{'тёплый': 1},
{'успеть': 1},
{'эластан': 1}]]
flat_list = [item for sublist in big_list for item in sublist]
result = {}
for i in flat_list:
result.update(i)
counter = collections.Counter(result).most_common()
print(counter)
dframe = | pd.DataFrame(counter, columns=["Word", "Count"]) | pandas.DataFrame |
import pandas as pd
import xlsxwriter
with open("authors_qcr.txt", encoding='utf-8') as f:
x = f.readlines()
s = []
for i in x:
s.append(i)
#clean_file.write(j)
print(s)
data = pd.DataFrame(s)
data2excel = | pd.ExcelWriter("wordcloud_test.xlsx", engine='xlsxwriter') | pandas.ExcelWriter |
import os
import shutil
from attrdict import AttrDict
import numpy as np
import pandas as pd
from scipy.stats import gmean
from deepsense import neptune
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split, KFold, StratifiedKFold
from . import pipeline_config as cfg
from .pipelines import PIPELINES
from .hyperparameter_tuning import RandomSearchTuner, HyperoptTuner, SkoptTuner, set_params
from .utils import init_logger, read_params, set_seed, create_submission, verify_submission, calculate_rank, \
read_oof_predictions, parameter_eval
set_seed(cfg.RANDOM_SEED)
logger = init_logger()
ctx = neptune.Context()
params = read_params(ctx, fallback_file='./configs/neptune.yaml')
class PipelineManager:
def train(self, pipeline_name, dev_mode):
train(pipeline_name, dev_mode)
def evaluate(self, pipeline_name, dev_mode):
evaluate(pipeline_name, dev_mode)
def predict(self, pipeline_name, dev_mode, submit_predictions):
predict(pipeline_name, dev_mode, submit_predictions)
def train_evaluate_cv(self, pipeline_name, model_level, dev_mode):
train_evaluate_cv(pipeline_name, model_level, dev_mode)
def train_evaluate_predict_cv(self, pipeline_name, model_level, dev_mode, submit_predictions):
train_evaluate_predict_cv(pipeline_name, model_level, dev_mode, submit_predictions)
def train(pipeline_name, dev_mode):
logger.info('TRAINING')
if bool(params.clean_experiment_directory_before_training) and os.path.isdir(params.experiment_directory):
logger.info('Cleaning experiment_directory...')
shutil.rmtree(params.experiment_directory)
tables = _read_data(dev_mode)
logger.info('Shuffling and splitting into train and test...')
train_data_split, valid_data_split = train_test_split(tables.train_set,
test_size=params.validation_size,
random_state=cfg.RANDOM_SEED,
shuffle=params.shuffle)
logger.info('Target mean in train: {}'.format(train_data_split[cfg.TARGET_COLUMNS].mean()))
logger.info('Target mean in valid: {}'.format(valid_data_split[cfg.TARGET_COLUMNS].mean()))
logger.info('Train shape: {}'.format(train_data_split.shape))
logger.info('Valid shape: {}'.format(valid_data_split.shape))
train_data = {'main_table': {'X': train_data_split.drop(cfg.TARGET_COLUMNS, axis=1),
'y': train_data_split[cfg.TARGET_COLUMNS].values.reshape(-1),
'X_valid': valid_data_split.drop[cfg.TARGET_COLUMNS].values.reshape(-1),
'y_valid': valid_data_split[cfg.TARGET_COLUMNS].values.reshape(-1),
},
'application': {'X': tables.application},
'bureau_balance': {'X': tables.bureau_balance},
'bureau': {'X': tables.bureau},
'credit_card_balance': {'X': tables.credit_card_balance},
'installments_payments': {'X': tables.installments_payments},
'pos_cash_balance': {'X': tables.pos_cash_balance},
'previous_application': {'X': tables.previous_application},
}
pipeline = PIPELINES[pipeline_name](config=cfg.SOLUTION_CONFIG, train_mode=True)
pipeline.clean_cache()
logger.info('Start pipeline fit and transform')
pipeline.fit_transform(train_data)
pipeline.clean_cache()
def evaluate(pipeline_name, dev_mode):
logger.info('EVALUATION')
logger.info('Reading data...')
tables = _read_data(dev_mode)
logger.info('Shuffling and splitting to get validation split...')
_, valid_data_split = train_test_split(tables.train_set,
test_size=params.validation_size,
random_state=cfg.RANDOM_SEED,
shuffle=params.shuffle)
logger.info('Target mean in valid: {}'.format(valid_data_split[cfg.TARGET_COLUMNS].mean()))
logger.info('Valid shape: {}'.format(valid_data_split.shape))
y_true = valid_data_split[cfg.TARGET_COLUMNS].values
eval_data = {'main_table': {'X': valid_data_split.drop(cfg.TARGET_COLUMNS, axis=1),
'y': None,
},
'application': {'X': tables.application},
'bureau_balance': {'X': tables.bureau_balance},
'bureau': {'X': tables.bureau},
'credit_card_balance': {'X': tables.credit_card_balance},
'installments_payments': {'X': tables.installments_payments},
'pos_cash_balance': {'X': tables.pos_cash_balance},
'previous_application': {'X': tables.previous_application},
}
pipeline = PIPELINES[pipeline_name](config=cfg.SOLUTION_CONFIG, train_mode=False)
pipeline.clean_cache()
logger.info('Start pipeline transform')
output = pipeline.transform(eval_data)
pipeline.clean_cache()
y_pred = output['prediction']
logger.info('Calculating ROC_AUC on validation set')
score = roc_auc_score(y_true, y_pred)
logger.info('ROC_AUC score on validation is {}'.format(score))
ctx.channel_send('ROC_AUC', 0, score)
def predict(pipeline_name, dev_mode, submit_predictions):
logger.info('PREDICTION')
tables = _read_data(dev_mode)
test_data = {'main_table': {'X': tables.test_set,
'y': None,
},
'application': {'X': tables.application},
'bureau_balance': {'X': tables.bureau_balance},
'bureau': {'X': tables.bureau},
'credit_card_balance': {'X': tables.credit_card_balance},
'installments_payments': {'X': tables.installments_payments},
'pos_cash_balance': {'X': tables.pos_cash_balance},
'previous_application': {'X': tables.previous_application},
}
pipeline = PIPELINES[pipeline_name](config=cfg.SOLUTION_CONFIG, train_mode=False)
pipeline.clean_cache()
logger.info('Start pipeline transform')
output = pipeline.transform(test_data)
pipeline.clean_cache()
y_pred = output['prediction']
if not dev_mode:
logger.info('creating submission file...')
submission = create_submission(tables.test_set, y_pred)
logger.info('verifying submission...')
sample_submission = pd.read_csv(params.sample_submission_filepath)
verify_submission(submission, sample_submission)
submission_filepath = os.path.join(params.experiment_directory, 'submission.csv')
submission.to_csv(submission_filepath, index=None, encoding='utf-8')
logger.info('submission persisted to {}'.format(submission_filepath))
logger.info('submission head \n\n{}'.format(submission.head()))
if submit_predictions and params.kaggle_api:
make_submission(submission_filepath)
def train_evaluate_cv(pipeline_name, model_level, dev_mode):
if parameter_eval(params.hyperparameter_search__method) is not None:
score_mean, score_std = train_evaluate_cv_tuning(pipeline_name, model_level, dev_mode)
else:
score_mean, score_std = train_evaluate_cv_one_run(pipeline_name, model_level, cfg.SOLUTION_CONFIG, dev_mode)
logger.info('ROC_AUC mean {}, ROC_AUC std {}'.format(score_mean, score_std))
ctx.channel_send('ROC_AUC', 0, score_mean)
ctx.channel_send('ROC_AUC STD', 0, score_std)
def train_evaluate_cv_tuning(pipeline_name, model_level, dev_mode):
config = cfg.SOLUTION_CONFIG
searchable_config = cfg.SOLUTION_CONFIG.tuner
if params.hyperparameter_search__method == 'random':
tuner = RandomSearchTuner(config=searchable_config,
runs=params.hyperparameter_search__runs)
elif params.hyperparameter_search__method == 'skopt':
tuner = SkoptTuner(config=searchable_config,
runs=params.hyperparameter_search__runs,
maximize=True)
elif params.hyperparameter_search__method == 'hyperopt':
tuner = HyperoptTuner(config=searchable_config,
runs=params.hyperparameter_search__runs,
maximize=True)
else:
raise NotImplementedError
results = []
while tuner.in_progress:
if tuner.run_id == 0:
proposed_config = tuner.next(None)
else:
proposed_config = tuner.next(score_mean)
config = set_params(config, proposed_config)
score_mean, score_std = train_evaluate_cv_one_run(pipeline_name, model_level, config, dev_mode,
tunable_mode=True)
logger.info('Run {} ROC_AUC mean {}, ROC_AUC std {}'.format(tuner.run_id, score_mean, score_std))
ctx.channel_send('Tuning CONFIG', tuner.run_id, proposed_config)
ctx.channel_send('Tuning ROC_AUC', tuner.run_id, score_mean)
ctx.channel_send('Tuning ROC_AUC STD', tuner.run_id, score_std)
results.append((score_mean, score_std, proposed_config))
best_score_mean, best_score_std, best_config = sorted(results, key=lambda x: x[0])[-1]
logger.info('ROC_AUC mean {}, ROC_AUC std {}'.format(best_score_mean, best_score_std))
logger.info('Best Params'.format(best_config))
ctx.channel_send('BEST_CONFIG', str(best_config))
return best_score_mean, best_score_std
def train_evaluate_cv_one_run(pipeline_name, model_level, config, dev_mode, tunable_mode=False):
if bool(params.clean_experiment_directory_before_training) and os.path.isdir(params.experiment_directory):
logger.info('Cleaning experiment_directory...')
shutil.rmtree(params.experiment_directory)
if model_level == 'first':
tables = _read_data(dev_mode)
main_table_train = tables.train_set
elif model_level == 'second':
tables = _read_data(dev_mode=False)
main_table_train, main_table_test = read_oof_predictions(params.first_level_oof_predictions_dir,
params.train_filepath,
id_column=cfg.ID_COLUMNS[0],
target_column=cfg.TARGET_COLUMNS[0])
else:
raise NotImplementedError
target_values = main_table_train[cfg.TARGET_COLUMNS].values.reshape(-1)
fold_generator = _get_fold_generator(target_values)
fold_scores = []
for fold_id, (train_idx, valid_idx) in enumerate(fold_generator):
train_data_split, valid_data_split = main_table_train.iloc[train_idx], main_table_train.iloc[valid_idx]
logger.info('Started fold {}'.format(fold_id))
logger.info('Target mean in train: {}'.format(train_data_split[cfg.TARGET_COLUMNS].mean()))
logger.info('Target mean in valid: {}'.format(valid_data_split[cfg.TARGET_COLUMNS].mean()))
logger.info('Train shape: {}'.format(train_data_split.shape))
logger.info('Valid shape: {}'.format(valid_data_split.shape))
score, _, _ = _fold_fit_evaluate_loop(train_data_split,
valid_data_split,
tables,
fold_id, pipeline_name, config, model_level)
logger.info('Fold {} ROC_AUC {}'.format(fold_id, score))
if not tunable_mode:
ctx.channel_send('Fold {} ROC_AUC'.format(fold_id), 0, score)
fold_scores.append(score)
score_mean, score_std = np.mean(fold_scores), np.std(fold_scores)
return score_mean, score_std
def train_evaluate_predict_cv(pipeline_name, model_level, dev_mode, submit_predictions):
if bool(params.clean_experiment_directory_before_training) and os.path.isdir(params.experiment_directory):
logger.info('Cleaning experiment_directory...')
shutil.rmtree(params.experiment_directory)
if model_level == 'first':
tables = _read_data(dev_mode)
main_table_train = tables.train_set
main_table_test = tables.test_set
elif model_level == 'second':
tables = _read_data(dev_mode=False)
main_table_train, main_table_test = read_oof_predictions(params.first_level_oof_predictions_dir,
params.train_filepath,
id_column=cfg.ID_COLUMNS[0],
target_column=cfg.TARGET_COLUMNS[0])
main_table_test = main_table_test.groupby(cfg.ID_COLUMNS).mean().reset_index()
else:
raise NotImplementedError
target_values = main_table_train[cfg.TARGET_COLUMNS].values.reshape(-1)
fold_generator = _get_fold_generator(target_values)
fold_scores, out_of_fold_train_predictions, out_of_fold_test_predictions = [], [], []
for fold_id, (train_idx, valid_idx) in enumerate(fold_generator):
train_data_split, valid_data_split = main_table_train.iloc[train_idx], main_table_train.iloc[valid_idx]
logger.info('Started fold {}'.format(fold_id))
logger.info('Target mean in train: {}'.format(train_data_split[cfg.TARGET_COLUMNS].mean()))
logger.info('Target mean in valid: {}'.format(valid_data_split[cfg.TARGET_COLUMNS].mean()))
logger.info('Train shape: {}'.format(train_data_split.shape))
logger.info('Valid shape: {}'.format(valid_data_split.shape))
score, out_of_fold_prediction, test_prediction = _fold_fit_evaluate_predict_loop(train_data_split,
valid_data_split,
main_table_test,
tables,
fold_id,
pipeline_name,
model_level)
logger.info('Fold {} ROC_AUC {}'.format(fold_id, score))
ctx.channel_send('Fold {} ROC_AUC'.format(fold_id), 0, score)
out_of_fold_train_predictions.append(out_of_fold_prediction)
out_of_fold_test_predictions.append(test_prediction)
fold_scores.append(score)
out_of_fold_train_predictions = pd.concat(out_of_fold_train_predictions, axis=0)
out_of_fold_test_predictions = pd.concat(out_of_fold_test_predictions, axis=0)
test_prediction_aggregated = _aggregate_test_prediction(out_of_fold_test_predictions)
score_mean, score_std = np.mean(fold_scores), np.std(fold_scores)
logger.info('ROC_AUC mean {}, ROC_AUC std {}'.format(score_mean, score_std))
ctx.channel_send('ROC_AUC', 0, score_mean)
ctx.channel_send('ROC_AUC STD', 0, score_std)
logger.info('Saving predictions')
out_of_fold_train_predictions.to_csv(os.path.join(params.experiment_directory,
'{}_out_of_fold_train_predictions.csv'.format(pipeline_name)),
index=None)
out_of_fold_test_predictions.to_csv(os.path.join(params.experiment_directory,
'{}_out_of_fold_test_predictions.csv'.format(pipeline_name)),
index=None)
test_aggregated_file_path = os.path.join(params.experiment_directory,
'{}_test_predictions_{}.csv'.format(pipeline_name,
params.aggregation_method))
test_prediction_aggregated.to_csv(test_aggregated_file_path, index=None)
if not dev_mode:
logger.info('verifying submission...')
sample_submission = pd.read_csv(params.sample_submission_filepath)
verify_submission(test_prediction_aggregated, sample_submission)
if submit_predictions and params.kaggle_api:
make_submission(test_aggregated_file_path)
def make_submission(submission_filepath):
logger.info('making Kaggle submit...')
os.system('kaggle competitions submit -c home-credit-default-risk -f {} -m {}'
.format(submission_filepath, params.kaggle_message))
def _read_data(dev_mode):
logger.info('Reading data...')
if dev_mode:
nrows = cfg.DEV_SAMPLE_SIZE
logger.info('running in "dev-mode". Sample size is: {}'.format(cfg.DEV_SAMPLE_SIZE))
else:
nrows = None
if any([parameter_eval(params.use_bureau),
parameter_eval(params.use_bureau_aggregations)]):
nrows_bureau = nrows
else:
nrows_bureau = 1
if parameter_eval(params.use_bureau_balance):
nrows_bureau_balance = nrows
else:
nrows_bureau_balance = 1
if any([parameter_eval(params.use_credit_card_balance),
parameter_eval(params.use_credit_card_balance_aggregations)]):
nrows_credit_card_balance = nrows
else:
nrows_credit_card_balance = 1
if any([parameter_eval(params.use_installments_payments),
parameter_eval(params.use_installments_payments_aggregations)]):
nrows_installments_payments = nrows
else:
nrows_installments_payments = 1
if any([parameter_eval(params.use_pos_cash_balance),
parameter_eval(params.use_pos_cash_balance_aggregations)]):
nrows_pos_cash_balance = nrows
else:
nrows_pos_cash_balance = 1
if any([parameter_eval(params.use_previous_applications),
parameter_eval(params.use_previous_applications_aggregations),
parameter_eval(params.use_previous_application_categorical_features),
parameter_eval(params.use_application_previous_application_categorical_features)]):
nrows_previous_applications = nrows
else:
nrows_previous_applications = 1
raw_data = {}
logger.info('Reading application_train ...')
application_train = pd.read_csv(params.train_filepath, nrows=nrows)
logger.info("Reading application_test ...")
application_test = pd.read_csv(params.test_filepath, nrows=nrows)
raw_data['application'] = pd.concat([application_train, application_test],
sort=False).drop(cfg.TARGET_COLUMNS, axis='columns')
raw_data['train_set'] = pd.DataFrame(application_train[cfg.ID_COLUMNS + cfg.TARGET_COLUMNS])
raw_data['test_set'] = pd.DataFrame(application_test[cfg.ID_COLUMNS])
logger.info("Reading bureau ...")
raw_data['bureau'] = pd.read_csv(params.bureau_filepath, nrows=nrows_bureau)
logger.info("Reading credit_card_balance ...")
raw_data['credit_card_balance'] = pd.read_csv(params.credit_card_balance_filepath, nrows=nrows_credit_card_balance)
logger.info("Reading pos_cash_balance ...")
raw_data['pos_cash_balance'] = | pd.read_csv(params.POS_CASH_balance_filepath, nrows=nrows_pos_cash_balance) | pandas.read_csv |
import streamlit as st
import pandas as pd
import base64
import os
import datetime
import sqlalchemy as sa
from pathlib import Path
import psycopg2
#creating sql alchemy engine
engine = sa.create_engine('postgresql://xiamtznyktfwmk:<EMAIL>:5432/dekfhtva5ndr6b',echo=False)
def check_if_weekend(today):
try:
isinstance(today, datetime.datetime)
upper_limit = today + datetime.timedelta(days=(6 - today.weekday()))
lower_limit = today + datetime.timedelta(days=(5 - today.weekday()))
if today >= lower_limit <= upper_limit:
return True
else:
return False
except ValueError:
pass
today_date = datetime.datetime.today()
weekend = check_if_weekend(today_date)
if weekend==True:
os.remove('week_log.csv')
try:
engine.execute('DROP TABLE table2')
except:
pass
new_week_log = pd.DataFrame(columns=['Name', 'Time', 'Days', 'Hours', 'Reason', 'Team'],index=None)
new_week_log.to_csv('week_log.csv', mode='w', header=True,index=None)
new_week_log.to_sql('table2',con=engine,index=False,index_label=None,if_exists='replace')
else:
try:
new_week_log=pd.read_sql('table2',con=engine,index_col=None)
except:
new_week_log = pd.DataFrame(columns=['Name', 'Time', 'Days', 'Hours', 'Reason', 'Team'])
new_week_log.to_sql('table2', con=engine, index=False, index_label=None, if_exists='replace')
new_week_log = pd.read_sql('table2', con=engine, index_col=None)
st.title('Work Checkin System')
st.sidebar.image('logo.jpg')
st.sidebar.markdown("""
***XYZ Team***
""")
data=pd.read_csv('data.csv',header=[0])
if os.path.exists('record.csv'):
try:
record=pd.read_sql('table1',con=engine,index_col=None)
except:
record=pd.read_csv('record.csv',index_col=None)
record.to_sql('table1',con=engine,index=False,index_label=None,if_exists='append')
else:
record = pd.DataFrame(columns=['Name', 'Time', 'Days', 'Hours', 'Reason', 'Team'],index=None)
record.to_csv('record.csv', mode='w', header=True,index=None)
record.to_sql('table1',con=engine,index=False,index_label=None,if_exists='replace')
st.write(record)
#st.write(pd.read_sql('table1',con=engine,index_col=None))
days=['mon','tue','wed','thurs','fri','sat','sun']
teams=['Development','PR','management']
st.warning('Avoid duplication, ignore if not applicable')
st.error('During the time of weekend it will reset itself and you wont be able to do any changes , dont checkin during the weekends')
def input_values():
data2 = pd.read_csv('data.csv', header=[0])
if st.sidebar.checkbox('Work for this week'):
selected_name = st.sidebar.selectbox('Name', options=data['Members'])
days_selected=st.sidebar.multiselect('Days free to work',options=days)
hours=st.sidebar.slider('No.of hours per week will be able to work',1.0,1.0,8.0)
team_willing=st.sidebar.multiselect('Team willing to work in',options=teams)
password=str(st.sidebar.text_input('enter the passphrase')).lower()
if st.sidebar.button('Submit details'):
y=data2.loc[data2.Members == str(selected_name)]
z=y.iloc[:,-1].values
if password==str(z[0]):
st.balloons()
input_data={
'Name':[str(selected_name)],
'Time':[str(datetime.datetime.today())],
'Days':[str(days_selected)],
'Hours':[str(hours)],
'Reason':['None'],
'Team':[str(team_willing)]
}
input_df=pd.DataFrame(input_data)
input_df.to_csv('record.csv', mode='a', header=False,index=None)
input_df.to_sql('table1',if_exists='append',con=engine,index=False,index_label=None)
record_changed = pd.read_sql('table1',con=engine,index_col=None)
record_reverse = record_changed.iloc[::-1]
st.subheader('Continous Log')
st.write(record_reverse.head())
input_df.to_csv('week_log.csv', mode='a', header=False,index=None)
input_df.to_sql('table2', if_exists='append', con=engine, index=False, index_label=None)
record_changed_wl = pd.read_sql('table2',con=engine,index_col=None)
record_reverse_wl = record_changed_wl.iloc[::-1]
st.subheader('Weekly Log')
st.write(record_reverse_wl.head())
else:
st.sidebar.warning('Wrong passphrase')
elif st.sidebar.checkbox('Cannot Work this week'):
selected_name = st.sidebar.selectbox('Name', options=data['Members'])
reason=st.sidebar.text_input('Reason')
password = str(st.sidebar.text_input('enter the passphrase')).lower()
if st.sidebar.button('Submit details'):
y = data2.loc[data2.Members == str(selected_name)]
z = y.iloc[:, -1].values
if password == str(z[0]):
st.balloons()
input_data={
'Name':[str(selected_name)],
'Time':[str(datetime.datetime.today())],
'Days':['None'],
'Hours':0,
'Reason':[str(reason)],
'Team':['None']
}
input_df= | pd.DataFrame(input_data) | pandas.DataFrame |
import random
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
NaT,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDataFrameSortValues:
def test_sort_values(self):
frame = DataFrame(
[[1, 1, 2], [3, 1, 0], [4, 5, 6]], index=[1, 2, 3], columns=list("ABC")
)
# by column (axis=0)
sorted_df = frame.sort_values(by="A")
indexer = frame["A"].argsort().values
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=["A"], ascending=[False])
tm.assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=["B", "C"])
expected = frame.loc[[2, 1, 3]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=["B", "C"], ascending=False)
tm.assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=["B", "A"], ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=2, inplace=True)
# by row (axis=1): GH#10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis="columns")
expected = frame.reindex(columns=["B", "A", "C"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
msg = r"Length of ascending \(5\) != length of by \(2\)"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=0, ascending=[True] * 5)
def test_sort_values_by_empty_list(self):
# https://github.com/pandas-dev/pandas/issues/40258
expected = DataFrame({"a": [1, 4, 2, 5, 3, 6]})
result = expected.sort_values(by=[])
tm.assert_frame_equal(result, expected)
assert result is not expected
def test_sort_values_inplace(self):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True)
assert return_value is None
expected = frame.sort_values(by="A")
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by=1, axis=1, inplace=True)
assert return_value is None
expected = frame.sort_values(by=1, axis=1)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", ascending=False, inplace=True)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True
)
assert return_value is None
expected = frame.sort_values(by=["A", "B"], ascending=False)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
result = frame.sort_values(by=["A", "B"])
indexer = np.lexsort((frame["B"], frame["A"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["A", "B"], ascending=False)
indexer = np.lexsort(
(frame["B"].rank(ascending=False), frame["A"].rank(ascending=False))
)
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["B", "A"])
indexer = np.lexsort((frame["A"], frame["B"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
def test_sort_values_multicolumn_uint64(self):
# GH#9918
# uint64 multicolumn sort
df = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
}
)
df["a"] = df["a"].astype(np.uint64)
result = df.sort_values(["a", "b"])
expected = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
},
index=pd.Index([1, 0]),
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nan(self):
# GH#3917
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]}
)
# sort one column only
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{"A": [np.nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, np.nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3],
)
sorted_df = df.sort_values(["A"], na_position="first", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=["B", "A"])
sorted_df = df.sort_values(by=1, axis=1, na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{"A": [1, 1, 2, 4, 6, 8, np.nan], "B": [2, 9, np.nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2],
)
sorted_df = df.sort_values(["A", "B"])
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 2, 9, np.nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], ascending=[1, 0], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{"A": [8, 6, 4, 2, 1, 1, np.nan], "B": [4, 5, 5, np.nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2],
)
sorted_df = df.sort_values(["A", "B"], ascending=[0, 1], na_position="last")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_descending_sort(self):
# GH#6399
df = DataFrame(
[[2, "first"], [2, "second"], [1, "a"], [1, "b"]],
columns=["sort_col", "order"],
)
sorted_df = df.sort_values(by="sort_col", kind="mergesort", ascending=False)
tm.assert_frame_equal(df, sorted_df)
@pytest.mark.parametrize(
"expected_idx_non_na, ascending",
[
[
[3, 4, 5, 0, 1, 8, 6, 9, 7, 10, 13, 14],
[True, True],
],
[
[0, 3, 4, 5, 1, 8, 6, 7, 10, 13, 14, 9],
[True, False],
],
[
[9, 7, 10, 13, 14, 6, 8, 1, 3, 4, 5, 0],
[False, True],
],
[
[7, 10, 13, 14, 9, 6, 8, 1, 0, 3, 4, 5],
[False, False],
],
],
)
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_sort_values_stable_multicolumn_sort(
self, expected_idx_non_na, ascending, na_position
):
# GH#38426 Clarify sort_values with mult. columns / labels is stable
df = DataFrame(
{
"A": [1, 2, np.nan, 1, 1, 1, 6, 8, 4, 8, 8, np.nan, np.nan, 8, 8],
"B": [9, np.nan, 5, 2, 2, 2, 5, 4, 5, 3, 4, np.nan, np.nan, 4, 4],
}
)
# All rows with NaN in col "B" only have unique values in "A", therefore,
# only the rows with NaNs in "A" have to be treated individually:
expected_idx = (
[11, 12, 2] + expected_idx_non_na
if na_position == "first"
else expected_idx_non_na + [2, 11, 12]
)
expected = df.take(expected_idx)
sorted_df = df.sort_values(
["A", "B"], ascending=ascending, na_position=na_position
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_categorial(self):
# GH#16793
df = DataFrame({"x": Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)})
expected = df.copy()
sorted_df = df.sort_values("x", kind="mergesort")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_datetimes(self):
# GH#3461, argsort / lexsort differences for a datetime column
df = DataFrame(
["a", "a", "a", "b", "c", "d", "e", "f", "g"],
columns=["A"],
index=date_range("20130101", periods=9),
)
dts = [
Timestamp(x)
for x in [
"2004-02-11",
"2004-01-21",
"2004-01-26",
"2005-09-20",
"2010-10-04",
"2009-05-12",
"2008-11-12",
"2010-09-28",
"2010-09-28",
]
]
df["B"] = dts[::2] + dts[1::2]
df["C"] = 2.0
df["A1"] = 3.0
df1 = df.sort_values(by="A")
df2 = df.sort_values(by=["A"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["B"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["C", "B"])
tm.assert_frame_equal(df1, df2)
def test_sort_values_frame_column_inplace_sort_exception(self, float_frame):
s = float_frame["A"]
with pytest.raises(ValueError, match="This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_values_nat_values_in_int_column(self):
# GH#14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT.value))
float_values = (2.0, -1.797693e308)
df = DataFrame(
{"int": int_values, "float": float_values}, columns=["int", "float"]
)
df_reversed = DataFrame(
{"int": int_values[::-1], "float": float_values[::-1]},
columns=["int", "float"],
index=[1, 0],
)
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(
{"datetime": [Timestamp("2016-01-01"), NaT], "float": float_values},
columns=["datetime", "float"],
)
df_reversed = DataFrame(
{"datetime": [NaT, Timestamp("2016-01-01")], "float": float_values[::-1]},
columns=["datetime", "float"],
index=[1, 0],
)
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories(self):
# GH#22556
# Positioning missing value properly when column is Categorical.
categories = ["A", "B", "C"]
category_indices = [0, 2, 4]
list_of_nans = [np.nan, np.nan]
na_indices = [1, 3]
na_position_first = "first"
na_position_last = "last"
column_name = "c"
reversed_categories = sorted(categories, reverse=True)
reversed_category_indices = sorted(category_indices, reverse=True)
reversed_na_indices = sorted(na_indices)
df = DataFrame(
{
column_name: Categorical(
["A", np.nan, "B", np.nan, "C"], categories=categories, ordered=True
)
}
)
# sort ascending with na first
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + categories, categories=categories, ordered=True
)
},
index=na_indices + category_indices,
)
tm.assert_frame_equal(result, expected)
# sort ascending with na last
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
categories + list_of_nans, categories=categories, ordered=True
)
},
index=category_indices + na_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na first
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + reversed_categories,
categories=categories,
ordered=True,
)
},
index=reversed_na_indices + reversed_category_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na last
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
reversed_categories + list_of_nans,
categories=categories,
ordered=True,
)
},
index=reversed_category_indices + reversed_na_indices,
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nat(self):
# GH#16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories_raises(self):
df = DataFrame(
{
"c": Categorical(
["A", np.nan, "B", np.nan, "C"],
categories=["A", "B", "C"],
ordered=True,
)
}
)
with pytest.raises(ValueError, match="invalid na_position: bad_position"):
df.sort_values(by="c", ascending=False, na_position="bad_position")
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ignore_index, output_index",
[
({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, False, [2, 1, 0]),
(
{"A": [1, 2, 3], "B": [2, 3, 4]},
{"A": [3, 2, 1], "B": [4, 3, 2]},
True,
[0, 1, 2],
),
(
{"A": [1, 2, 3], "B": [2, 3, 4]},
{"A": [3, 2, 1], "B": [4, 3, 2]},
False,
[2, 1, 0],
),
],
)
def test_sort_values_ignore_index(
self, inplace, original_dict, sorted_dict, ignore_index, output_index
):
# GH 30114
df = DataFrame(original_dict)
expected = DataFrame(sorted_dict, index=output_index)
kwargs = {"ignore_index": ignore_index, "inplace": inplace}
if inplace:
result_df = df.copy()
result_df.sort_values("A", ascending=False, **kwargs)
else:
result_df = df.sort_values("A", ascending=False, **kwargs)
tm.assert_frame_equal(result_df, expected)
tm.assert_frame_equal(df, DataFrame(original_dict))
def test_sort_values_nat_na_position_default(self):
# GH 13230
expected = DataFrame(
{
"A": [1, 2, 3, 4, 4],
"date": pd.DatetimeIndex(
[
"2010-01-01 09:00:00",
"2010-01-01 09:00:01",
"2010-01-01 09:00:02",
"2010-01-01 09:00:03",
"NaT",
]
),
}
)
result = expected.sort_values(["A", "date"])
tm.assert_frame_equal(result, expected)
def test_sort_values_item_cache(self, using_array_manager):
# previous behavior incorrect retained an invalid _item_cache entry
df = DataFrame(np.random.randn(4, 3), columns=["A", "B", "C"])
df["D"] = df["A"] * 2
ser = df["A"]
if not using_array_manager:
assert len(df._mgr.blocks) == 2
df.sort_values(by="A")
ser.values[0] = 99
assert df.iloc[0, 0] == df["A"][0]
def test_sort_values_reshaping(self):
# GH 39426
values = list(range(21))
expected = DataFrame([values], columns=values)
df = expected.sort_values(expected.index[0], axis=1, ignore_index=True)
| tm.assert_frame_equal(df, expected) | pandas._testing.assert_frame_equal |
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import numpy as np
import os
import pandas as pd
import seaborn as sns
from sklearn.metrics import confusion_matrix
def CM(y, y_pred, labels, save_path=None, verbose=True):
cm = confusion_matrix(y, y_pred)
if verbose:
print("Confusion matrix for validation data:")
print(cm)
if save_path is not None:
n_digits = int(np.ceil(np.log10(np.max(cm))))
str_fmt = '%-'+str(n_digits)+'.0f'
np.savetxt(save_path+"/confusion_matrix.txt", cm, fmt=str_fmt)
cm_figure = plot_cm(cm, normalize=True, labels=labels)
cm_figure.figure.savefig(save_path+"/confusion_matrix_normalized.png")
if verbose:
print("Confusion matrix saved in", save_path)
return cm
FIGSIZE = None
MARKERSIZE = 100
MAXCOLORS = 8
# Define a LAI Colormap
# CMAP = "tab20b"
# LAI_PALETTE = ["#A60303", "#8DA6F2", "#254F6B", "#613673", "#75BFAA", "#AAAAAA", "#3457BF", "#D9414E" ]
LAI_PALETTE = ["#A60303", "#3457BF", "#75BFAA", "#613673", "#8DA6F2", "#AAAAAA", "#254F6B", "#D9414E" ]
CMAP = ListedColormap(LAI_PALETTE)
def visualize_palette(palette=None):
if palette is None:
palette = LAI_PALETTE
nn = 100
for i, col in enumerate(palette):
plt.plot(np.arange(nn), np.repeat(i,nn), color=col, linewidth=20)
plt.show()
def haplo_tile_plot(haplos, pop_order=None, bbox_to_anchor=[1.2,1.0]):
"""
Tile plot for visualizing haplotypes.
- haplos: array of haplotypes
- pop_order: order of ancestry for figure legend
"""
ANI_FIGSIZE = (10,2)
haplos = np.array(haplos, dtype=int)
n_anc = len(np.unique(haplos))
n_haplo, n_wind = haplos.shape
XX = np.array([range(n_wind) for _ in range(n_haplo)]).reshape(-1)
YY = np.array([np.repeat(i, n_wind) for i in range(n_haplo)]).reshape(-1)
CC = haplos.reshape(-1)
fig, ax = plt.subplots(figsize=ANI_FIGSIZE, constrained_layout=True)
normalize = matplotlib.colors.Normalize(vmin=0, vmax=n_anc)
scat = ax.scatter(XX, YY, c = CC, marker="s", cmap=CMAP, norm=normalize, s=MARKERSIZE)
y_ticks_new = ["P" if i%2 else "M" for i in range(n_haplo)]
y_ticks_new = [tick+"'" if t%4>1 else tick for t, tick in enumerate(y_ticks_new)]
plt.setp(ax, yticks=range(n_haplo), yticklabels=y_ticks_new)
if pop_order is not None:
handles, labels = scat.legend_elements()
plt.legend(handles, pop_order[np.unique(CC)], loc="upper right", bbox_to_anchor=bbox_to_anchor, title="Ancestry")
return fig, ax
def plot_cm(cm, normalize=True, labels=None, figsize=(12,10)):
plt.figure(figsize=figsize)
# normalize w.r.t. number of samples from class
if normalize:
cm = cm/np.sum(cm, axis=0)
cm = np.nan_to_num(cm, copy=False, nan=0.0)
df_cm = pd.DataFrame(cm, range(cm.shape[0]), range(cm.shape[1]))
sns.set(font_scale=1.4) # for label size
if labels is None:
fig = sns.heatmap(df_cm, annot=False, annot_kws={"size": 16}) # font size
else:
fig = sns.heatmap(df_cm, xticklabels=labels, yticklabels=labels,
annot=False, annot_kws={"size": 16}) # font size
plt.show()
return fig
def plot_chm(sample_id, msp_df, rm_img=False, img_name="chm_img"):
"""
Wrapper function for plotting with Tagore.
Requires an msp dataframe and a sample_id of which to plot the chromosome.
"""
# defining a color palette
palette = sns.color_palette("colorblind").as_hex()
# get the base of the tagore style dataframe
nrows = msp_df.shape[0]
default_params = pd.DataFrame({"feature": [0]*nrows, "size": [1]*nrows})
tagore_base = msp_df[["#chm", "spos", "epos"]].join(default_params)
tagore_base.columns = ["chm", "start", "stop", "feature", "size"]
# adding data from the individual with that sample_id
colors0 = [palette[i] for i in np.array(msp_df[sample_id+".0"])]
colors1 = [palette[i] for i in np.array(msp_df[sample_id+".1"])]
tagore0 = tagore_base.join(pd.DataFrame({"color": colors0, "chrCopy": 1}))
tagore1 = tagore_base.join(pd.DataFrame({"color": colors1, "chrCopy": 2}))
tagore_df = | pd.concat([tagore0, tagore1]) | pandas.concat |
import matplotlib.pyplot as plt
import pandas as pd
def plot_transactions_by_hour(transactions_by_hour):
"""
Generates a bar plot for transactions by hour
"""
plt.figure()
avg_block_sizes_df = | pd.Series(transactions_by_hour) | pandas.Series |
import pandas as pd
import pickle
import argparse
import numpy as np
from plotnine import ggplot,theme_bw,scale_alpha_manual,guides,scale_size_manual,guide_legend,element_rect,element_line, ggsave,scale_color_brewer,annotate,element_blank, element_text, scale_x_discrete,scale_y_continuous, aes,theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline
import sys
sys.path.insert(0, './')
# parameters
parser = argparse.ArgumentParser(description='Results on DenseNet and VGG')
parser.add_argument('--dataset', default='CIFAR10', type=str, help='Dataset to be used: CIFAR100, CIFAR10')
args = parser.parse_args()
# check parameters
assert args.dataset == 'CIFAR10' or args.dataset == 'CIFAR100', 'Dataset can only be CIFAR10 or CIFAR100.'
lower_quantiles_mean = np.zeros((3, 2))
lower_quantiles_std = np.zeros((3, 2))
base_size = 18
for k, model_type in enumerate(['ResNet', 'DenseNet', 'VGG']):
alpha = 0.1
epsilon = 0.125
ratio = 2 # ratio between adversarial noise bound to smoothed noise
sigma_smooth = ratio * epsilon * 0 # sigma used fro smoothing
sigma_model = sigma_smooth # sigma used for training the model
n_smooth = 1 # number of samples used for smoothing
My_model = True
normalized = True
dataset = args.dataset
Regularization = False
directory = "./Results/" + str(dataset) + "/epsilon_" + str(epsilon) + "/sigma_model_" + str(
sigma_model) + "/sigma_smooth_" + str(sigma_smooth) + "/n_smooth_" + str(n_smooth)
if normalized:
directory = directory + "/Robust"
if model_type != 'ResNet':
directory = directory + "/" + str(model_type)
if dataset == "CIFAR10" and model_type == 'ResNet':
if My_model:
directory = directory + "/My_Model"
else:
directory = directory + "/Their_Model"
if Regularization:
directory = directory + "/Regularization"
if alpha != 0.1:
directory = directory + "/alpha_" + str(alpha)
path = directory + "/results.csv"
results = pd.read_csv(path)
results = results.loc[:, ~results.columns.str.contains('^Unnamed')]
results = results.drop(columns=['Black box', 'Conditional coverage', 'Size cover'])
results1 = results[(results["Method"] == "SC_simple") | (results["Method"] == "HCC_simple")]
results1["Method"].replace({"SC_simple": "APS", "HCC_simple": "HPS"}, inplace=True)
results1 = results1.rename(columns={'Method': 'Base Score'}, inplace=False)
data1 = results1[results1['noise_L2_norm'] == epsilon].copy()
data1["Type"] = " Vanilla CP"
data1["Model"] = model_type
data1["Position"] = " "
sigma_smooth = ratio * epsilon # sigma used fro smoothing
sigma_model = sigma_smooth # sigma used for training the model
n_smooth = 256
directory = "./Results/" + str(dataset) + "/epsilon_" + str(epsilon) + "/sigma_model_" + str(
sigma_model) + "/sigma_smooth_" + str(sigma_smooth) + "/n_smooth_" + str(n_smooth)
if normalized:
directory = directory + "/Robust"
if model_type != 'ResNet':
directory = directory + "/" + str(model_type)
if dataset == "CIFAR10" and model_type == 'ResNet':
if My_model:
directory = directory + "/My_Model"
else:
directory = directory + "/Their_Model"
if Regularization:
directory = directory + "/Regularization"
if alpha != 0.1:
directory = directory + "/alpha_" + str(alpha)
path = directory + "/results.csv"
results = pd.read_csv(path)
results = results.loc[:, ~results.columns.str.contains('^Unnamed')]
results = results.drop(columns=['Black box', 'Conditional coverage', 'Size cover'])
results2 = results[(results["Method"] == "SC_smoothed_score") | (results["Method"] == "HCC_smoothed_score")]
results2["Method"].replace({"SC_smoothed_score": "APS", "HCC_smoothed_score": "HPS"}, inplace=True)
results2 = results2.rename(columns={'Method': 'Base Score'}, inplace=False)
data2 = results2[results2["noise_L2_norm"] == epsilon].copy()
data2["Type"] = "CP + SS"
data2["Model"] = model_type
data2["Position"] = " "
results3 = results[
(results["Method"] == "SC_smoothed_score_correction") | (results["Method"] == "HCC_smoothed_score_correction")]
results3["Method"].replace({"SC_smoothed_score_correction": "APS", "HCC_smoothed_score_correction": "HPS"},
inplace=True)
results3 = results3.rename(columns={'Method': 'Base Score'}, inplace=False)
data3 = results3[results3["noise_L2_norm"] == epsilon].copy()
data3["Type"] = "RSCP"
data3["Model"] = model_type
data3["Position"] = " "
current = data1.append(data2)
current = current.append(data3)
#current['Position'] = current['Position'].cat.reorder_categories(['Up', 'Down'])
if k == 0:
final = current
else:
final = final.append(current)
with open(directory + "/quantiles_bounds.pickle", 'rb') as f:
quantiles = np.array(pickle.load(f))[0]
for p in range(2):
lower_quantiles_mean[k, p] = np.mean(quantiles[p, 0, :])
lower_quantiles_std[k, p] = np.std(quantiles[p, 0, :])
nominal = pd.DataFrame({'name': ['Nominal Level'], 'Coverage': [1-alpha], 'Position': [' ']})
lines1 = | pd.DataFrame({'name': ['APS', 'APS', 'APS'], 'Coverage': [lower_quantiles_mean[0, 0], lower_quantiles_mean[1, 0], lower_quantiles_mean[2, 0]], 'Position': [' ', ' ', ' '], 'Model': ['DenseNet', 'ResNet', 'VGG']}) | pandas.DataFrame |
from linescanning.plotting import LazyPlot
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.interpolate import interp1d
import seaborn as sns
from nilearn.glm.first_level import first_level
from nilearn.glm.first_level import hemodynamic_models
from nilearn import plotting
import warnings
class GenericGLM():
"""GenericGLM
Main class to perform a simple GLM with python. Will do most of the processes internally, and allows you to plot various processes along the way.
Parameters
----------
onset: pandas.DataFrame
Dataframe containing the onset times for all events in an experiment. Specifically design to work smoothly with :func:`linescanning.utils.ParseExpToolsFile`. You should insert the output from :func:`linescanning.utils.ParseExpToolsFile.get_onset_df()` as `onset`
data: numpy.ndarray, pandas.DataFrame
<time,voxels> numpy array or pandas DataFrame; required for creating the appropriate length of the stimulus vectors
hrf_pars: dict, optional
dictionary collecting the parameters required for :func:`linescanning.glm.double_gamma` (generally the defaults are fine though!)
>>> pars = {'lag': 6,
>>> 'a2': 12,
>>> 'b1': 12,
>>> 'b2': 12,
>>> 'c': 12,
>>> 'scale': True}
TR: float
repetition time of acquisition
osf: int, optional
Oversampling factor used to account for decimal onset times, by default None. The larger this factor, the more accurate decimal onset times will be processed, but also the bigger your upsampled convolved becomes, which means convolving will take longer.
type: str, optional
Use block design of event-related design, by default 'event'. If set to 'block', `block_length` is required.
block_length: int, optional
Duration of block in seconds, by default None
amplitude: int, list, optional
Amplitude to be used when creating the stimulus vector, by default None. If nothing is specified, the amplitude will be set to '1', like you would in a regular FSL 1-/3-column file. If you want variable amplitudes for different events for in a simulation, you can specify a list with an equal length to the number of events present in `onset_df`.
regressors: pandas.DataFrame, numpy.ndarray, optional
Add a bunch of regressors to the design
make_figure: bool, optional
Create overview figure of HRF, stimulus vector, and convolved stimulus vector, by default False
scan_length: int
number of volumes in `data` (= `scan_length` in :func:`linescanning.glm.make_stimulus_vector`)
xkcd: bool, optional
Plot the figre in XKCD-style (cartoon), by default False
plot_vox: int, optional
Instead of plotting the best-fitting voxel, specify which voxel to plot the timecourse and fit of, by default None
plot_event: str, int, list, optional
If a larger design matrix was inputted with multiple events, you can specify here the name of the event you'd like to plot the betas from. It also accepts a list of indices of events to plot, so you could plot the first to events by specifying `plot_event=[1,2]`. Remember, the 0th index is the intercept! By default we'll plot the event right after the intercept
contrast_matrix: numpy.ndarray, optional
contrast array for the event regressors. If none, we'll create a contrast matrix that estimates the effect of each regressor and the baseline
nilearn: bool, optional
use nilearn implementation of `FirstLevelModel` (True) or bare python (False). The later gives easier access to betas, while the former allows implementation of AR-noise models.
Returns
----------
dict
Dictionary collecting outputs under the following keys
* betas: <n_regressors (+intercept), n_voxels> beta values
* tstats: <n_regressors (+intercept), n_voxels> t-statistics (FSL-way)
* x_conv: <n_timepoints, n_regressors (+intercept)> design matrix
* resids: <n_timepoints, n_voxels> residuals>
matplotlib.pyplot
plots along the process if `make_figure=True`
Example
----------
>>> # import modules
>>> from linescanning.glm import GenericGLM
>>> from linescanning import utils
>>>
>>> # define file with fMRI-data and the output from Exptools2
>>> func_file = "some_func_file.mat"
>>> exp_file = "some_exp_file.tsv"
>>>
>>> # load in functional data
>>> func = utils.ParseFuncFile(func_file,
>>> subject=1,
>>> run=1,
>>> deleted_first_timepoints=200,
>>> deleted_last_timepoints=200,
>>> bp_filter="rolling")
>>>
>>> # fetch HP-filtered, percent-signal changed data
>>> data = func.dct_psc_df.copy()
>>>
>>> # load in exptools-file, use attributes from 'func'
>>> onset = utils.ParseExpToolsFile(exp_file,
>>> subject=func.subject,
>>> run=func.run,
>>> delete_vols=(func.deleted_first_timepoints),
>>> TR=func.TR)
>>>
>>> # fetch the onset times and event names in a dataframe
>>> onsets = onset.get_onset_df()
>>>
>>> # do the fitting
>>> fitting = GenericGLM(onsets, data.values, TR=func.TR, osf=1000)
Notes
----------
For `FirstLevelModel` to work with our type of data, I had to add the following to `https://github.com/nilearn/nilearn/blob/main/nilearn/glm/first_level/first_level.py#L683`:
```python
for output_type_ in output_types:
estimate_ = getattr(contrast, output_type_)()
if return_type == "imgs":
# Prepare the returned images
output = self.masker_.inverse_transform(estimate_)
contrast_name = str(con_vals)
output.header['descrip'] = (
'%s of contrast %s' % (output_type_, contrast_name))
outputs[output_type_] = output
else:
output = estimate_
outputs[output_type_] = output
```
This ensures we're getting an array back, rather than a nifti-image for our statistics
"""
def __init__(self, onsets, data, hrf_pars=None, TR=None, osf=1, contrast_matrix=None, exp_type='event', block_length=None, amplitude=None, regressors=None, make_figure=False, xkcd=False, plot_event=[1, 2], plot_vox=None, verbose=False, nilearn=False, derivative=False, dispersion=False):
# %%
# instantiate
self.onsets = onsets
self.hrf_pars = hrf_pars
self.TR = TR
self.osf = osf
self.exp_type = exp_type
self.block_length = block_length
self.amplitude = amplitude
self.regressors = regressors
self.make_figure = make_figure
self.xkcd = xkcd
self.plot_event = plot_event
self.plot_vox = plot_vox
self.verbose = verbose
self.contrast_matrix = contrast_matrix
self.nilearn_method = nilearn
self.dispersion = dispersion
self.derivative = derivative
if isinstance(data, np.ndarray):
self.data = data.copy()
elif isinstance(data, pd.DataFrame):
self.data = data.values
else:
raise ValueError("Data must be 'np.ndarray' or 'pandas.DataFrame'")
# %%
# make the stimulus vectors
if verbose:
print("Creating stimulus vector(s)")
self.stims = make_stimulus_vector(self.onsets, scan_length=self.data.shape[0], osf=self.osf, type=self.exp_type)
# %%
# define HRF
self.hrf_kernel = []
if verbose:
print("Defining HRF")
self.hrf = glover_hrf(osf=osf, TR=self.TR, dispersion=self.dispersion, derivative=self.derivative)
# %%
# convolve stimulus vectors
if verbose:
print("Convolve stimulus vectors with HRF")
self.stims_convolved = convolve_hrf(self.hrf, self.stims, make_figure=self.make_figure, xkcd=self.xkcd)
if self.osf > 1:
if verbose:
print("Resample convolved stimulus vectors")
self.stims_convolved_resampled = resample_stim_vector(self.stims_convolved, self.data.shape[0])
else:
self.stims_convolved_resampled = self.stims_convolved.copy()
self.condition_names = list(self.stims_convolved_resampled.keys())
# %%
# finalize design matrix (with regressors)
if verbose:
print("Creating design matrix")
self.design = first_level_matrix(self.stims_convolved_resampled, regressors=self.regressors)
if self.make_figure:
self.plot_design_matrix()
# %%
# Fit all
if verbose:
print("Running fit")
if self.nilearn_method:
# we're going to hack Nilearn's FirstLevelModel to be compatible with our line-data. First, we specify the model as usual
self.fmri_glm = first_level.FirstLevelModel(t_r=self.TR,
noise_model='ar1',
standardize=False,
hrf_model='spm',
drift_model='cosine',
high_pass=.01)
# Normally, we'd run `fmri_glm = fmri_glm.fit()`, but because this requires nifti-like inputs, we run `run_glm` outside of that function to get the labels:
if isinstance(data, pd.DataFrame):
data = data.values
elif isinstance(data, np.ndarray):
data = data.copy()
else:
raise ValueError(f"Unknown input type {type(data)} for functional data. Must be pd.DataFrame or np.ndarray [time, voxels]")
self.labels, self.results = first_level.run_glm(data, self.design, noise_model='ar1')
# Then, we inject this into the `fmri_glm`-class so we can compute contrasts
self.fmri_glm.labels_ = [self.labels]
self.fmri_glm.results_ = [self.results]
# insert the design matrix:
self.fmri_glm.design_matrices_ = []
self.fmri_glm.design_matrices_.append(self.design)
# Then we specify our contrast matrix:
if self.contrast_matrix == None:
if self.verbose:
print("Defining standard contrast matrix")
matrix = np.eye(len(self.condition_names))
icept = np.zeros((len(self.condition_names), 1))
matrix = np.hstack((icept, matrix)).astype(int)
self.contrast_matrix = matrix.copy()
self.conditions = {}
for idx, name in enumerate(self.condition_names):
self.conditions[name] = self.contrast_matrix[idx, ...]
if self.verbose:
print("Computing contrasts")
self.tstats = []
for event in self.conditions:
tstat = self.fmri_glm.compute_contrast(self.conditions[event],
stat_type='t',
output_type='stat',
return_type=None)
self.tstats.append(tstat)
self.tstats = np.array(self.tstats)
else:
self.results = fit_first_level(self.design, self.data, make_figure=self.make_figure, xkcd=self.xkcd, plot_vox=self.plot_vox, plot_event=self.plot_event)
def plot_contrast_matrix(self, save_as=None):
if self.nilearn_method:
fig,axs = plt.subplots(figsize=(10,10))
plotting.plot_contrast_matrix(self.contrast_matrix, design_matrix=self.design, ax=axs)
if save_as:
fig.savefig(save_as)
else:
raise NotImplementedError("Can't use this function without nilearn-fitting. Set 'nilearn=True'")
def plot_design_matrix(self, save_as=None):
fig,axs = plt.subplots(figsize=(10,10))
plotting.plot_design_matrix(self.design, ax=axs)
if save_as:
fig.savefig(save_as)
def glover_hrf(osf=1, TR=0.105, dispersion=False, derivative=False, time_length=25):
# osf factor is different in `hemodynamic_models`
osf /= 10
# set kernel
hrf_kernel = []
hrf = hemodynamic_models.glover_hrf(TR, oversampling=osf, time_length=time_length)
hrf /= hrf.max()
hrf_kernel.append(hrf)
if derivative:
tderiv_hrf = hemodynamic_models.glover_time_derivative(tr=TR, oversampling=osf, time_length=time_length)
tderiv_hrf /= tderiv_hrf.max()
hrf_kernel.append(tderiv_hrf)
if dispersion:
tdisp_hrf = hemodynamic_models.glover_dispersion_derivative(TR, oversampling=osf, time_length=time_length)
tdisp_hrf /= tdisp_hrf.max()
hrf_kernel.append(tdisp_hrf)
return hrf_kernel
def make_stimulus_vector(onset_df, scan_length=None, TR=0.105, osf=None, type='event', block_length=None, amplitude=None):
"""make_stimulus_vector
Creates a stimulus vector for each of the conditions found in `onset_df`. You can account for onset times being in decimal using the oversampling factor `osf`. This would return an upsampled stimulus vector which should be convolved with an equally upsampled HRF. This can be ensured by using the same `osf` in :func:`linescanning.glm.double_gamma`.
Parameters
----------
onset_df: pandas.DataFrame
onset times as read in with :class:`linescanning.utils.ParseExpToolsFile`
scan_length: float, optional
length of the , by default None
TR: float, optional
Repetition time, by default 0.105. Will be used to calculate the required length of the stimulus vector
osf: [type], optional
Oversampling factor used to account for decimal onset times, by default None
type: str, optional
Use block design of event-related design, by default 'event'. If set to 'block', `block_length` is required.
block_length: int, optional
Duration of block in seconds, by default None
amplitude: int, list, optional
Amplitude to be used when creating the stimulus vector, by default None. If nothing is specified, the amplitude will be set to '1', like you would in a regular FSL 1-/3-column file. If you want variable amplitudes for different events for in a simulation, you can specify a list with an equal length to the number of events present in `onset_df`.
Returns
----------
dict
Dictionary collecting numpy array stimulus vectors for each event present in `onset_df` under the keys <event name>
Raises
----------
ValueError
`onset_df` should contain event names
ValueError
if multiple amplitudes are requested but the length of `amplitude` does not match the number of events
ValueError
`block_length` should be an integer
Example
----------
>>> from linescanning import utils
>>> from linescanning import glm
>>> exp_file = 'path/to/exptools2_file.tsv'
>>> exp_df = utilsParseExpToolsFile(exp_file, subject=1, run=1)
>>> times = exp_df.get_onset_df()
>>> # oversample with factor 1000 to get rid of 3 decimals in onset times
>>> osf = 1000
>>> # make stimulus vectors
>>> stims = glm.make_stimulus_vector(times, scan_length=400, osf=osf, type='event')
>>> stims
{'left': array([0., 0., 0., ..., 0., 0., 0.]),
'right': array([0., 0., 0., ..., 0., 0., 0.])}
"""
# check if we should reset or not
try:
onset_df = onset_df.reset_index()
except:
onset_df = onset_df
# check conditions we have
try:
names_cond = onset_df['event_type'].unique()
names_cond.sort()
except:
raise ValueError('Could not extract condition names; are you sure you formatted the dataframe correctly?')
# check if we got multiple amplitudes
if isinstance(amplitude, np.ndarray):
ampl_array = amplitude
elif isinstance(amplitude, list):
ampl_array = np.array(amplitude)
else:
ampl_array = False
# loop through unique conditions
stim_vectors = {}
for idx,condition in enumerate(names_cond):
if isinstance(ampl_array, np.ndarray):
if ampl_array.shape[0] == names_cond.shape[0]:
ampl = amplitude[idx]
print(f"Amplitude for event '{names_cond[idx]}' = {round(ampl,2)}")
else:
raise ValueError(f"Nr of amplitudes ({ampl_array.shape[0]}) does not match number of conditions ({names_cond.shape[0]})")
else:
ampl = 1
Y = np.zeros(int((scan_length*TR)*osf))
if type == "event":
for rr, ii in enumerate(onset_df['onset']):
if onset_df['event_type'][rr] == condition:
try:
Y[int(ii*osf)] = ampl
except:
warnings.warn(f"Warning: could not include event {rr} with t = {ii}. Probably experiment continued after functional acquisition")
elif type == 'block':
if not isinstance(block_length, int):
raise ValueError("Please specify the length of the block in seconds (integer)")
for rr, ii in enumerate(onset_df['onset']):
if onset_df['event_type'][rr] == condition:
Y[int(ii*osf):int((ii+block_length)*osf)] = ampl
stim_vectors[condition] = Y
return stim_vectors
def convolve_hrf(hrf, stim_v, make_figure=False, xkcd=False):
"""convolve_hrf
Convolve :func:`linescanning.glm.double_gamma` with :func:`linescanning.glm.make_stimulus_vector`. There's an option to plot the result in a nice overview figure, though python-wise it's not the prettiest..
Parameters
----------
hrf: numpy.ndarray
HRF across given timepoints with shape (,`x.shape[0]`)
stim_v: numpy.ndarray, list
Stimulus vector as per :func:`linescanning.glm.make_stimulus_vector` or numpy array containing one stimulus vector (e.g., a *key* from :func:`linescanning.glm.make_stimulus_vector`)
make_figure: bool, optional
Create overview figure of HRF, stimulus vector, and convolved stimulus vector, by default False
osf: [type], optional
Oversampling factor used to account for decimal onset times, by default None
scan_length: int
number of volumes in `data` (= `scan_length` in :func:`linescanning.glm.make_stimulus_vector`)
xkcd: bool, optional
Plot the figre in XKCD-style (cartoon), by default False
add_array1: numpy.ndarray, optional
additional stimulus vector to be plotted on top of `stim_v`, by default None
add_array2: numpy.ndarray, optional
additional **convolved** stimulus vector to be plotted on top of `stim_v`, by default None
regressors: pandas.DataFrame
add a bunch of regressors with shape <time,voxels> to the design matrix. Should be in the dimensions of the functional data, not the oversampled..
Returns
----------
matplotlib.plot
if `make_figure=True`, a figure will be displayed
pandas.DataFrame
if `osf > 1`, then resampled stimulus vector DataFrame is returned. If not, the convolved stimulus vectors are returned in a dataframe as is
Example
----------
>>> from linescanning.glm import convolve_hrf
>>> convolved_stim_vector_left = convolve_hrf(hrf_custom, stims, make_figure=True, xkcd=True) # creates figure too
>>> convolved_stim_vector_left = convolve_hrf(hrf_custom, stims) # no figure
"""
def plot(stim_v, hrf, convolved, xkcd=False):
fig = plt.figure(figsize=(20,6))
gs = fig.add_gridspec(2, 2, width_ratios=[20, 10], hspace=0.7)
ax0 = fig.add_subplot(gs[0,0])
LazyPlot(stim_v,
color="#B1BDBD",
axs=ax0,
title="Events",
y_lim=[-.5, 1.5],
x_label='Time (*osf)',
y_label='Activity (A.U.)',
xkcd=xkcd,
font_size=16)
# check if we got derivatives; if so, select first element (= standard HRF)
if isinstance(convolved, list):
convolved = np.array(convolved)
if convolved.shape[-1] > 1:
convolved = convolved[:,0]
ax1 = fig.add_subplot(gs[1, 0])
LazyPlot(convolved,
axs=ax1,
title="Convolved stimulus-vector",
x_label='Time (*osf)',
y_label='Activity (A.U.)',
xkcd=xkcd,
font_size=16)
ax2 = fig.add_subplot(gs[:, 1])
LazyPlot(hrf,
axs=ax2,
title="HRF",
x_label='Time (*osf)',
xkcd=xkcd,
font_size=16)
# check hrf input
if isinstance(hrf, list):
hrfs = hrf.copy()
elif isinstance(hrf, np.ndarray):
hrfs = [hrf]
else:
raise ValueError(f"Unknown input type '{type(hrf)}' for HRF. Must be list or array")
# convolve stimulus vectors
if isinstance(stim_v, np.ndarray):
if len(hrf) >= 1:
convolved_stim_vector = np.zeros((stim_v.shape[0], len(hrf)))
for ix,rf in enumerate(hrf):
convolved_stim_vectors[:,ix] = np.convolve(stim_v, rf, 'full')[:stim_v.shape[0]]
if make_figure:
plot(stim_v, hrf[0], convolved_stim_vector, xkcd=xkcd)
plt.show()
elif isinstance(stim_v, dict):
if len(hrf) >= 1:
convolved_stim_vector = {}
for event in list(stim_v.keys()):
hrf_conv = np.zeros((stim_v[event].shape[0], len(hrf)))
for ix,rf in enumerate(hrf):
hrf_conv[...,ix] = np.convolve(stim_v[event], rf, 'full')[:stim_v[event].shape[0]]
convolved_stim_vector[event] = hrf_conv
if make_figure:
if xkcd:
with plt.xkcd():
plot(stim_v[event], hrf[0], convolved_stim_vector[event])
else:
plot(stim_v[event], hrf[0], convolved_stim_vector[event])
plt.show()
else:
raise ValueError("Data must be 'np.ndarray' or 'dict'")
return convolved_stim_vector
def resample_stim_vector(convolved_array, scan_length, interpolate='nearest'):
"""resample_stim_vector
Resample the oversampled stimulus vector back in to functional time domain
Parameters
----------
convolved_array: dict, numpy.ndarray
oversampled convolved stimulus vector as per :func:`linescanning.glm.convolve_hrf`
scan_length: int
number of volumes in `data` (= `scan_length` in :func:`linescanning.glm.make_stimulus_vector`)
interpolate: str, optional
interpolation method, by default 'nearest'
Returns
----------
dict, numpy.ndarray
convolved stimulus vector in time domain that matches the fMRI acquisition
Example
----------
>>> from linescanning.glm import resample_stim_vector
>>> convolved_stim_vector_left_ds = resample_stim_vector(convolved_stim_vector_left, <`scan_length`>)
"""
if isinstance(convolved_array, np.ndarray):
interpolated = interp1d(np.arange(len(convolved_array)), convolved_array, kind=interpolate, axis=0, fill_value='extrapolate')
downsampled = interpolated(np.linspace(0, len(convolved_array), scan_length))
elif isinstance(convolved_array, dict):
downsampled = {}
for event in list(convolved_array.keys()):
event_arr = convolved_array[event]
if event_arr.shape[-1] > 1:
tmp = np.zeros((scan_length, event_arr.shape[-1]))
for elem in range(event_arr.shape[-1]):
data = event_arr[..., elem]
interpolated = interp1d(
np.arange(len(data)), data, kind=interpolate, axis=0, fill_value='extrapolate')
tmp[...,elem] = interpolated(np.linspace(0, len(data), scan_length))
downsampled[event] = tmp
else:
interpolated = interp1d(np.arange(len(convolved_array[event])), convolved_array[event], kind=interpolate, axis=0, fill_value='extrapolate')
downsampled[event] = interpolated(np.linspace(0, len(convolved_array[event]), scan_length))
else:
raise ValueError("Data must be 'np.ndarray' or 'dict'")
return downsampled
def first_level_matrix(stims_dict, regressors=None, add_intercept=True, names=None):
# make dataframe of stimulus vectors
if isinstance(stims_dict, np.ndarray):
if names:
stims = pd.DataFrame(stims_dict, columns=names)
else:
stims = pd.DataFrame(stims_dict, columns=[f'event {ii}' for ii in range(stims_dict.shape[-1])])
elif isinstance(stims_dict, dict):
# check if we got time/dispersion derivatives
cols = []
data = []
keys = list(stims_dict.keys())
for key in keys:
if stims_dict[key].shape[-1] == 1:
cols.extend([key])
elif stims_dict[key].shape[-1] == 2:
cols.extend([key, f'{key}_1st_derivative'])
elif stims_dict[key].shape[-1] == 3:
cols.extend([key, f'{key}_1st_derivative', f'{key}_2nd_derivative'])
data.append(stims_dict[key])
data = np.concatenate(data, axis=-1)
stims = pd.DataFrame(data, columns=cols)
else:
raise ValueError("Data must be 'np.ndarray' or 'dict'")
# check if we should add intercept
if add_intercept:
intercept = np.ones((stims.shape[0], 1))
intercept_df = pd.DataFrame(intercept, columns=['intercept'])
X_matrix = pd.concat([intercept_df, stims], axis=1)
else:
X_matrix = stims.copy()
# check if we should add regressors
if isinstance(regressors, np.ndarray):
regressors_df = pd.DataFrame(regressors, columns=[f'regressor {ii}' for ii in range(regressors.shape[-1])])
return | pd.concat([X_matrix, regressors_df], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like(mock):
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
assert not is_file(mock.Mock())
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, compat.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
# Dummy class used for testing with Python objects
class Dummy():
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
assert result == 'integer'
result = lib.infer_dtype([])
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'decimal'
def test_string(self):
pass
def test_unicode(self):
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'unicode' if PY2 else 'string'
assert result == expected
@pytest.mark.parametrize('dtype, missing, skipna, expected', [
(float, np.nan, False, 'floating'),
(float, np.nan, True, 'floating'),
(object, np.nan, False, 'floating'),
(object, np.nan, True, 'empty'),
(object, None, False, 'mixed'),
(object, None, True, 'empty')
])
@pytest.mark.parametrize('box', [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([n, pd.Period('2011-01', freq='D'), n])
assert lib.infer_dtype(arr) == 'period'
# different type of nat
arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
@pytest.mark.parametrize(
"data",
[
[datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],
[Timestamp("20170612"), Timestamp("20170311")],
[Timestamp("20170612", tz='US/Eastern'),
Timestamp("20170311", tz='US/Eastern')],
[date(2017, 6, 12),
Timestamp("20170311", tz='US/Eastern')],
[np.datetime64("2017-06-12"), np.datetime64("2017-03-11")],
[np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)]
]
)
def test_infer_datetimelike_array_datetime(self, data):
assert lib.infer_datetimelike_array(data) == "datetime"
@pytest.mark.parametrize(
"data",
[
[timedelta(2017, 6, 12), timedelta(2017, 3, 11)],
[timedelta(2017, 6, 12), date(2017, 3, 11)],
[np.timedelta64(2017, "D"), np.timedelta64(6, "s")],
[np.timedelta64(2017, "D"), timedelta(2017, 3, 11)]
]
)
def test_infer_datetimelike_array_timedelta(self, data):
assert lib.infer_datetimelike_array(data) == "timedelta"
def test_infer_datetimelike_array_date(self):
arr = [date(2017, 6, 12), date(2017, 3, 11)]
assert lib.infer_datetimelike_array(arr) == "date"
@pytest.mark.parametrize(
"data",
[
["2017-06-12", "2017-03-11"],
[20170612, 20170311],
[20170612.5, 20170311.8],
[Dummy(), Dummy()],
[Timestamp("20170612"), Timestamp("20170311", tz='US/Eastern')],
[Timestamp("20170612"), 20170311],
[timedelta(2017, 6, 12), Timestamp("20170311", tz='US/Eastern')]
]
)
def test_infer_datetimelike_array_mixed(self, data):
assert lib.infer_datetimelike_array(data) == "mixed"
@pytest.mark.parametrize(
"first, expected",
[
[[None], "mixed"],
[[np.nan], "mixed"],
[[pd.NaT], "nat"],
[[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"],
[[np.datetime64("2017-06-12"), pd.NaT], "datetime"],
[[date(2017, 6, 12), pd.NaT], "date"],
[[timedelta(2017, 6, 12), pd.NaT], "timedelta"],
[[np.timedelta64(2017, "D"), pd.NaT], "timedelta"]
]
)
@pytest.mark.parametrize("second", [None, np.nan])
def test_infer_datetimelike_array_nan_nat_like(self, first, second,
expected):
first.append(second)
assert lib.infer_datetimelike_array(first) == expected
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr) == 'floating'
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr) == 'mixed'
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr) == 'datetime'
# np.datetime64(nat)
arr = np.array([np.datetime64('nat')])
assert lib.infer_dtype(arr) == 'datetime64'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([pd.NaT, n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64('nat'),
np.timedelta64('nat'), np.nan])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
assert lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='US/Eastern')],
dtype=object))
assert not lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='CET')],
dtype=object))
@pytest.mark.parametrize(
"func",
[
'is_datetime_array',
'is_datetime64_array',
'is_bool_array',
'is_timedelta_or_timedelta64_array',
'is_date_array',
'is_time_array',
'is_interval_array',
'is_period_array'])
def test_other_dtypes_for_array(self, func):
func = getattr(lib, func)
arr = np.array(['foo', 'bar'])
assert not func(arr)
arr = np.array([1, 2])
assert not func(arr)
def test_date(self):
dates = [date(2012, 1, day) for day in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'date'
dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan]
result = lib.infer_dtype(dates)
assert result == 'mixed'
result = lib.infer_dtype(dates, skipna=True)
assert result == 'date'
def test_is_numeric_array(self):
assert lib.is_float_array(np.array([1, 2.0]))
assert lib.is_float_array(np.array([1, 2.0, np.nan]))
assert not lib.is_float_array(np.array([1, 2]))
assert lib.is_integer_array(np.array([1, 2]))
assert not lib.is_integer_array(np.array([1, 2.0]))
def test_is_string_array(self):
assert lib.is_string_array(np.array(['foo', 'bar']))
assert not lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=False)
assert lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=True)
assert not lib.is_string_array(np.array([1, 2]))
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array([[1, 2, 3, None, None],
[4, 5, 6, None, None]], dtype=object)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_is_period(self):
assert lib.is_period(pd.Period('2011-01', freq='M'))
assert not lib.is_period(pd.PeriodIndex(['2011-01'], freq='M'))
assert not lib.is_period(pd.Timestamp('2011-01'))
assert not lib.is_period(1)
assert not lib.is_period(np.nan)
def test_categorical(self):
# GH 8974
from pandas import Categorical, Series
arr = Categorical(list('abc'))
result = lib.infer_dtype(arr)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr))
assert result == 'categorical'
arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)
result = lib.infer_dtype(arr)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr))
assert result == 'categorical'
class TestNumberScalar(object):
def test_is_number(self):
assert is_number(True)
assert is_number(1)
assert is_number(1.1)
assert is_number(1 + 3j)
assert is_number(np.bool(False))
assert is_number(np.int64(1))
assert is_number(np.float64(1.1))
assert is_number(np.complex128(1 + 3j))
assert is_number(np.nan)
assert not is_number(None)
assert not is_number('x')
assert not is_number(datetime(2011, 1, 1))
assert not is_number(np.datetime64('2011-01-01'))
assert not is_number(Timestamp('2011-01-01'))
assert not is_number(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_number(timedelta(1000))
assert not is_number(Timedelta('1 days'))
# questionable
assert not is_number(np.bool_(False))
assert is_number(np.timedelta64(1, 'D'))
def test_is_bool(self):
assert is_bool(True)
assert is_bool(np.bool(False))
assert is_bool(np.bool_(False))
assert not is_bool(1)
assert not is_bool(1.1)
assert not is_bool(1 + 3j)
assert not is_bool(np.int64(1))
assert not is_bool(np.float64(1.1))
assert not is_bool(np.complex128(1 + 3j))
assert not is_bool(np.nan)
assert not is_bool(None)
assert not is_bool('x')
assert not is_bool(datetime(2011, 1, 1))
assert not is_bool(np.datetime64('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_bool(timedelta(1000))
assert not is_bool(np.timedelta64(1, 'D'))
assert not is_bool(Timedelta('1 days'))
def test_is_integer(self):
assert is_integer(1)
assert is_integer(np.int64(1))
assert not is_integer(True)
assert not is_integer(1.1)
assert not is_integer(1 + 3j)
assert not is_integer(np.bool(False))
assert not is_integer(np.bool_(False))
assert not is_integer(np.float64(1.1))
assert not is_integer(np.complex128(1 + 3j))
assert not is_integer(np.nan)
assert not is_integer(None)
assert not is_integer('x')
assert not is_integer(datetime(2011, 1, 1))
assert not is_integer(np.datetime64('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_integer(timedelta(1000))
assert not is_integer(Timedelta('1 days'))
# questionable
assert is_integer(np.timedelta64(1, 'D'))
def test_is_float(self):
assert is_float(1.1)
assert is_float(np.float64(1.1))
assert is_float(np.nan)
assert not is_float(True)
assert not is_float(1)
assert not is_float(1 + 3j)
assert not is_float(np.bool(False))
assert not is_float(np.bool_(False))
assert not is_float(np.int64(1))
assert not is_float(np.complex128(1 + 3j))
assert not is_float(None)
assert not is_float('x')
assert not is_float(datetime(2011, 1, 1))
assert not is_float(np.datetime64('2011-01-01'))
assert not is_float(Timestamp('2011-01-01'))
assert not is_float(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_float(timedelta(1000))
assert not is_float(np.timedelta64(1, 'D'))
assert not is_float(Timedelta('1 days'))
def test_is_datetime_dtypes(self):
ts = pd.date_range('20130101', periods=3)
tsa = pd.date_range('20130101', periods=3, tz='US/Eastern')
assert is_datetime64_dtype('datetime64')
assert is_datetime64_dtype('datetime64[ns]')
assert is_datetime64_dtype(ts)
assert not is_datetime64_dtype(tsa)
assert not is_datetime64_ns_dtype('datetime64')
assert is_datetime64_ns_dtype('datetime64[ns]')
assert is_datetime64_ns_dtype(ts)
assert is_datetime64_ns_dtype(tsa)
assert is_datetime64_any_dtype('datetime64')
assert is_datetime64_any_dtype('datetime64[ns]')
assert is_datetime64_any_dtype(ts)
assert is_datetime64_any_dtype(tsa)
assert not is_datetime64tz_dtype('datetime64')
assert not is_datetime64tz_dtype('datetime64[ns]')
assert not is_datetime64tz_dtype(ts)
assert is_datetime64tz_dtype(tsa)
for tz in ['US/Eastern', 'UTC']:
dtype = 'datetime64[ns, {}]'.format(tz)
assert not is_datetime64_dtype(dtype)
assert is_datetime64tz_dtype(dtype)
assert is_datetime64_ns_dtype(dtype)
assert is_datetime64_any_dtype(dtype)
def test_is_timedelta(self):
assert is_timedelta64_dtype('timedelta64')
assert is_timedelta64_dtype('timedelta64[ns]')
assert not is_timedelta64_ns_dtype('timedelta64')
assert is_timedelta64_ns_dtype('timedelta64[ns]')
tdi = TimedeltaIndex([1e14, 2e14], dtype='timedelta64[ns]')
assert is_timedelta64_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi.astype('timedelta64[ns]'))
# Conversion to Int64Index:
assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64'))
assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64[h]'))
class TestIsScalar(object):
def test_is_scalar_builtin_scalars(self):
assert is_scalar(None)
assert is_scalar(True)
assert is_scalar(False)
assert is_scalar(Number())
assert is_scalar(Fraction())
assert is_scalar(0.)
assert is_scalar(np.nan)
assert is_scalar('foobar')
assert is_scalar(b'foobar')
assert is_scalar(u('efoobar'))
assert is_scalar(datetime(2014, 1, 1))
assert is_scalar(date(2014, 1, 1))
assert is_scalar(time(12, 0))
assert is_scalar(timedelta(hours=1))
assert is_scalar(pd.NaT)
def test_is_scalar_builtin_nonscalars(self):
assert not is_scalar({})
assert not is_scalar([])
assert not is_scalar([1])
assert not is_scalar(())
assert not is_scalar((1, ))
assert not is_scalar(slice(None))
assert not is_scalar(Ellipsis)
def test_is_scalar_numpy_array_scalars(self):
assert is_scalar(np.int64(1))
assert is_scalar(np.float64(1.))
assert is_scalar(np.int32(1))
assert is_scalar(np.object_('foobar'))
assert is_scalar(np.str_('foobar'))
assert is_scalar(np.unicode_(u('foobar')))
assert is_scalar(np.bytes_(b'foobar'))
assert is_scalar(np.datetime64('2014-01-01'))
assert is_scalar(np.timedelta64(1, 'h'))
def test_is_scalar_numpy_zerodim_arrays(self):
for zerodim in [np.array(1), np.array('foobar'),
np.array(np.datetime64('2014-01-01')),
np.array(np.timedelta64(1, 'h')),
np.array(np.datetime64('NaT'))]:
assert not | is_scalar(zerodim) | pandas.core.dtypes.common.is_scalar |
import pandas as pd
import numpy as np
import datetime
import calendar
from math import e
from brightwind.analyse import plot as plt
# noinspection PyProtectedMember
from brightwind.analyse.analyse import dist_by_dir_sector, dist_12x24, coverage, _convert_df_to_series
from ipywidgets import FloatProgress
from IPython.display import display
from IPython.display import clear_output
import re
import warnings
pd.options.mode.chained_assignment = None
__all__ = ['Shear']
class Shear:
class TimeSeries:
def __init__(self, wspds, heights, min_speed=3, calc_method='power_law', max_plot_height=None,
maximise_data=False):
"""
Calculates alpha, using the power law, or the roughness coefficient, using the log law, for each timestamp
of a wind series.
:param wspds: pandas DataFrame, list of pandas.Series or list of wind speeds to be used for calculating shear.
:type wspds: pandas.DataFrame, list of pandas.Series or list.
:param heights: List of anemometer heights.
:type heights: list
:param min_speed: Only speeds higher than this would be considered for calculating shear, default is 3.
:type min_speed: float
:param calc_method: method to use for calculation, either 'power_law' (returns alpha) or 'log_law'
(returns the roughness coefficient).
:type calc_method: str
:param max_plot_height: height to which the wind profile plot is extended.
:type max_plot_height: float
:param maximise_data: If maximise_data is True, calculations will be carried out on all data where two or
more anemometers readings exist for a timestamp. If False, calculations will only be
carried out on timestamps where readings exist for all anemometers.
:type maximise_data: Boolean
:return TimeSeries object containing calculated alpha/roughness coefficient values, a plot
and other data.
:rtype TimeSeries object
**Example usage**
::
import brightwind as bw
import pprint
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Using with a DataFrame of wind speeds
timeseries_power_law = bw.Shear.TimeSeries(anemometers, heights, maximise_data=True)
timeseries_log_law = bw.Shear.TimeSeries(anemometers, heights, calc_method='log_law',
max_plot_height=120)
# Get the alpha or roughness values calculated
timeseries_power_law.alpha
timeseries_log_law.roughness
# View plot
timeseries_power_law.plot
timeseries_log_law.plot
# View input anemometer data
timeseries_power_law.wspds
timeseries_log_law.wspds
# View other information
pprint.pprint(timeseries_power_law.info)
pprint.pprint(timeseries_log_law.info)
"""
print('This may take a while...')
wspds, cvg = Shear._data_prep(wspds=wspds, heights=heights, min_speed=min_speed, maximise_data=maximise_data)
if calc_method == 'power_law':
alpha_c = (wspds[(wspds > min_speed).all(axis=1)].apply(Shear._calc_power_law, heights=heights,
return_coeff=True,
maximise_data=maximise_data, axis=1))
alpha = pd.Series(alpha_c.iloc[:, 0], name='alpha')
self._alpha = alpha
elif calc_method == 'log_law':
slope_intercept = (wspds[(wspds > min_speed).all(axis=1)].apply(Shear._calc_log_law, heights=heights,
return_coeff=True,
maximise_data=maximise_data, axis=1))
slope = slope_intercept.iloc[:, 0]
intercept = slope_intercept.iloc[:, 1]
roughness_coefficient = pd.Series(Shear._calc_roughness(slope=slope, intercept=intercept),
name='roughness_coefficient')
self._roughness = roughness_coefficient
clear_output()
avg_plot = Shear.Average(wspds=wspds, heights=heights, calc_method=calc_method,
max_plot_height=max_plot_height)
self.origin = 'TimeSeries'
self.calc_method = calc_method
self.wspds = wspds
self.plot = avg_plot.plot
self.info = Shear._create_info(self, heights=heights, cvg=cvg, min_speed=min_speed)
@property
def alpha(self):
return self._alpha
@property
def roughness(self):
return self._roughness
def apply(self, wspds, height, shear_to):
""""
Applies shear calculated to a wind speed time series and scales wind speed from one height to
another for each matching timestamp.
:param self: TimeSeries object to use when applying shear to the data.
:type self: TimeSeries object
:param wspds: Wind speed time series to apply shear to.
:type wspds: pandas.Series
:param height: height of above wspds.
:type height: float
:param shear_to: height to which wspds should be scaled to.
:type shear_to: float
:return: a pandas.Series of the scaled wind speeds.
:rtype: pandas.Series
**Example Usage**
::
import brightwind as bw
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Get power law object
timeseries_power_law = bw.Shear.TimeSeries(anemometers, heights)
timeseries_log_law = bw.Shear.TimeSeries(anemometers, heights, calc_method='log_law')
# Scale wind speeds using calculated exponents
timeseries_power_law.apply(data['Spd40mN'], height=40, shear_to=70)
timeseries_log_law.apply(data['Spd40mN'], height=40, shear_to=70)
"""
return Shear._apply(self, wspds, height, shear_to)
class TimeOfDay:
def __init__(self, wspds, heights, min_speed=3, calc_method='power_law', by_month=True, segment_start_time=7,
segments_per_day=24, plot_type='line'):
"""
Calculates alpha, using the power law, or the roughness coefficient, using the log law, for a wind series
binned by time of the day and (optionally by) month, depending on the user's inputs. The alpha/roughness
coefficient values are calculated based on the average wind speeds at each measurement height in each bin.
:param wspds: pandas.DataFrame, list of pandas.Series or list of wind speeds to be used for calculating
shear.
:type wspds: pandas.DataFrame, list of pandas.Series or list.
:param heights: List of anemometer heights..
:type heights: list
:param min_speed: Only speeds higher than this would be considered for calculating shear, default is 3
:type min_speed: float
:param calc_method: method to use for calculation, either 'power_law' (returns alpha) or 'log_law'
(returns the roughness coefficient).
:type calc_method: str
:param by_month: If True, calculate alpha or roughness coefficient values for each daily segment and month.
If False, average alpha or roughness coefficient values are calculated for each daily
segment across all months.
:type by_month: Boolean
:param segment_start_time: Starting time for first segment.
:type segment_start_time: int
:param segments_per_day: Number of segments into which each 24 period is split. Must be a divisor of 24.
:type segments_per_day: int
:param plot_type: Type of plot to be generated. Options include 'line', 'step' and '12x24'.
:type plot_type: str
:return: TimeOfDay object containing calculated alpha/roughness coefficient values, a plot
and other data.
:rtype: TimeOfDay object
**Example usage**
::
import brightwind as bw
import pprint
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Using with a DataFrame of wind speeds
timeofday_power_law = bw.Shear.TimeOfDay(anemometers, heights, daily_segments=2, segment_start_time=7)
timeofday_log_law = bw.Shear.TimeOfDay(anemometers, heights, calc_method='log_law', by_month=False)
# Get alpha or roughness values calculated
timeofday_power_law.alpha
timeofday_log_law.roughness
# View plot
timeofday_power_law.plot
timeofday_log_law.plot
# View input data
timeofday_power_law.wspds
timeofday_log_law.wspds
# View other information
pprint.pprint(timeofday_power_law.info)
pprint.pprint(timeofday_log_law.info)
"""
wspds, cvg = Shear._data_prep(wspds=wspds, heights=heights, min_speed=min_speed)
# initialise empty series for later use
start_times = pd.Series([])
time_wspds = pd.Series([])
mean_time_wspds = pd.Series([])
c = pd.Series([])
slope = pd.Series([])
intercept = pd.Series([])
alpha = pd.Series([])
roughness = pd.Series([])
slope_df = pd.DataFrame([])
intercept_df = pd.DataFrame([])
roughness_df = pd.DataFrame([])
alpha_df = pd.DataFrame([])
# time of day shear calculations
interval = int(24 / segments_per_day)
if by_month is False and plot_type == '12x24':
raise ValueError("12x24 plot is only possible when 'by_month=True'")
if not int(segment_start_time) % 1 == 0:
raise ValueError("'segment_start_time' must be an integer between 0 and 24'")
if not (24 % segments_per_day == 0) | (segments_per_day == 1):
raise ValueError("'segments_per_day' must be a divisor of 24'")
segment_start_time = str(segment_start_time)
start_times[0] = datetime.datetime.strptime(segment_start_time, '%H')
dt = datetime.timedelta(hours=interval)
# extract wind speeds for each daily segment
for i in range(1, segments_per_day):
start_times[i] = start_times[i - 1] + dt
# extract wind speeds for each month
months_tot = pd.unique(wspds.index.month.values)
for j in months_tot:
anemometers_df = wspds[wspds.index.month == j]
for i in range(0, segments_per_day):
if segments_per_day == 1:
mean_time_wspds[i] = anemometers_df.mean().dropna()
elif i == segments_per_day - 1:
start_times[i] = start_times[i].strftime("%H:%M:%S")
start = str(start_times[i].time())
end = str(start_times[0].time())
time_wspds[i] = pd.DataFrame(anemometers_df).between_time(start, end, include_end=False)
mean_time_wspds[i] = time_wspds[i][(time_wspds[i] > min_speed).all(axis=1)].mean().dropna()
else:
start_times[i] = start_times[i].strftime("%H:%M:%S")
start = str(start_times[i].time())
end = str(start_times[i + 1].time())
time_wspds[i] = pd.DataFrame(anemometers_df).between_time(start, end, include_end=False)
mean_time_wspds[i] = time_wspds[i][(time_wspds[i] > min_speed).all(axis=1)].mean().dropna()
# calculate shear
if calc_method == 'power_law':
for i in range(0, len(mean_time_wspds)):
alpha[i], c[i] = Shear._calc_power_law(mean_time_wspds[i].values, heights, return_coeff=True)
alpha_df = pd.concat([alpha_df, alpha], axis=1)
if calc_method == 'log_law':
for i in range(0, len(mean_time_wspds)):
slope[i], intercept[i] = Shear._calc_log_law(mean_time_wspds[i].values, heights,
return_coeff=True)
roughness[i] = Shear._calc_roughness(slope=slope[i], intercept=intercept[i])
roughness_df = pd.concat([roughness_df, roughness], axis=1)
slope_df = pd.concat([slope_df, slope], axis=1)
intercept_df = pd.concat([intercept_df, intercept], axis=1)
# error check
if mean_time_wspds.shape[0] == 0:
raise ValueError('None of the input wind speeds are greater than the min_speed, cannot calculate shear')
if calc_method == 'power_law':
alpha_df.index = start_times
alpha_df.index = alpha_df.index.time
alpha_df.sort_index(inplace=True)
if by_month is True:
alpha_df.columns = [calendar.month_abbr[month] for month in months_tot]
self.plot = plt.plot_shear_time_of_day(Shear._fill_df_12x24(alpha_df), calc_method=calc_method,
plot_type=plot_type)
else:
n_months = len(alpha_df.columns.values)
alpha_df = pd.DataFrame(alpha_df.mean(axis=1))
alpha_df.columns = [str(n_months) + ' Month Average']
df_in = pd.DataFrame((Shear._fill_df_12x24(alpha_df)).iloc[:, 0])
df_in.columns = [str(n_months) + ' Month Average']
self.plot = plt.plot_shear_time_of_day(df_in, calc_method=calc_method, plot_type=plot_type)
alpha_df.index.name = 'segment_start_time'
self._alpha = alpha_df
if calc_method == 'log_law':
roughness_df.index = slope_df.index = intercept_df.index = start_times
roughness_df.index = slope_df.index = intercept_df.index = roughness_df.index.time
roughness_df.sort_index(inplace=True)
slope_df.sort_index(inplace=True)
intercept_df.sort_index(inplace=True)
if by_month is True:
roughness_df.columns = slope_df.columns = intercept_df.columns = \
[calendar.month_abbr[month] for month in months_tot]
self.plot = plt.plot_shear_time_of_day(Shear._fill_df_12x24(roughness_df),
calc_method=calc_method, plot_type=plot_type)
else:
n_months = len(slope_df.columns.values)
slope_df = pd.DataFrame(slope_df.mean(axis=1))
intercept_df = pd.DataFrame(intercept_df.mean(axis=1))
roughness_df = pd.DataFrame(roughness_df.mean(axis=1))
roughness_df.columns = slope_df.columns = intercept_df.columns = \
[str(len(months_tot)) + '_month_average']
df_in = pd.DataFrame(Shear._fill_df_12x24(roughness_df).iloc[:, 0])
df_in.columns = [str(n_months) + ' Month Average']
self.plot = plt.plot_shear_time_of_day(df_in, calc_method=calc_method, plot_type=plot_type)
roughness_df.index.name = 'segment_start_time'
self._roughness = roughness_df
self.calc_method = calc_method
self.wspds = wspds
self.origin = 'TimeOfDay'
self.info = Shear._create_info(self, heights=heights, cvg=cvg, min_speed=min_speed,
segment_start_time=segment_start_time, segments_per_day=segments_per_day)
@property
def alpha(self):
return self._alpha
@property
def roughness(self):
return self._roughness
def apply(self, wspds, height, shear_to):
"""
Applies shear calculated to a wind speed time series by time of day (and optionally by month) to scale
wind speed from one height to another.
:param self: TimeOfDay object to use when applying shear to the data.
:type self: TimeOfDay object
:param wspds: Wind speed time series to apply shear to.
:type wspds: pandas.Series
:param height: height of above wspds.
:type height: float
:param shear_to: height to which wspds should be scaled.
:type shear_to: float
:return: a pandas.Series of the scaled wind speeds.
:rtype: pandas.Series
**Example Usage**
::
import brightwind as bw
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Get power law object
timeofday_power_law = bw.Shear.TimeOfDay(anemometers, heights)
timeofday_log_law = bw.Shear.TimeOfDay(anemometers, heights, calc_method='log_law')
# Scale wind speeds using calculated exponents
timeofday_power_law.apply(data['Spd40mN'], height=40, shear_to=70)
timeofday_log_law.apply(data['Spd40mN'], height=40, shear_to=70)
"""
return Shear._apply(self, wspds, height, shear_to)
class Average:
def __init__(self, wspds, heights, min_speed=3, calc_method='power_law', plot_both=False, max_plot_height=None):
"""
Calculates alpha, using the power law, or the roughness coefficient, using the log law, based on the
average wind speeds of each supplied time series.
:param wspds: pandas.DataFrame, list of pandas.Series or list of wind speeds to be used for calculating
shear.
:type wspds: pandas.DataFrame, list of pandas.Series or list.
:param heights: List of anemometer heights
:type heights: list
:param min_speed: Only speeds higher than this would be considered for calculating shear, default is 3.
:type min_speed: float
:param calc_method: method to use for calculation, either 'power_law' (returns alpha) or 'log_law'
(returns the roughness coefficient).
:type calc_method: str
:param max_plot_height: height to which the wind profile plot is extended.
:type max_plot_height: float
:return: Average object containing calculated alpha/roughness coefficient values, a plot and other data.
:rtype: Average object
**Example usage**
::
import brightwind as bw
import pprint
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Using with a DataFrame of wind speeds
average_power_law = bw.Shear.Average(anemometers, heights)
average_log_law = bw.Shear.Average(anemometers, heights, calc_method='log_law', max_plot_height=120)
# Get the alpha or roughness values calculated
average_power_law.alpha
average_log_law.roughness
# View plot
average_power_law.plot
average_log_law.plot
# View input data
average_power_law.wspds
average_log_law.wspds
# View other information
pprint.pprint(average_power_law.info)
pprint.pprint(average_log_law.info)
"""
wspds, cvg = Shear._data_prep(wspds=wspds, heights=heights, min_speed=min_speed)
mean_wspds = wspds.mean(axis=0)
if mean_wspds.shape[0] == 0:
raise ValueError('None of the input wind speeds are greater than the min_speed, cannot calculate shear')
if calc_method == 'power_law':
alpha, c = Shear._calc_power_law(mean_wspds.values, heights, return_coeff=True)
if plot_both is True:
slope, intercept = Shear._calc_log_law(mean_wspds.values, heights, return_coeff=True)
self.plot = plt.plot_power_law(plot_both=True, avg_alpha=alpha, avg_c=c, avg_slope=slope,
avg_intercept=intercept,
wspds=mean_wspds.values, heights=heights,
max_plot_height=max_plot_height)
else:
self.plot = plt.plot_power_law(alpha, c, mean_wspds.values, heights,
max_plot_height=max_plot_height)
self._alpha = alpha
elif calc_method == 'log_law':
slope, intercept = Shear._calc_log_law(mean_wspds.values, heights, return_coeff=True)
roughness = Shear._calc_roughness(slope=slope, intercept=intercept)
self._roughness = roughness
if plot_both is True:
alpha, c = Shear._calc_power_law(mean_wspds.values, heights, return_coeff=True)
self.plot = plt.plot_power_law(avg_alpha=alpha, avg_c=c, avg_slope=slope, avg_intercept=intercept,
wspds=mean_wspds.values, heights=heights,
max_plot_height=max_plot_height)
else:
self.plot = plt.plot_log_law(slope, intercept, mean_wspds.values, heights,
max_plot_height=max_plot_height)
else:
raise ValueError("Please enter a valid calculation method, either 'power_law' or 'log_law'.")
self.wspds = wspds
self.origin = 'Average'
self.calc_method = calc_method
self.info = Shear._create_info(self, heights=heights, min_speed=min_speed, cvg=cvg)
@property
def alpha(self):
return self._alpha
@property
def roughness(self):
return self._roughness
def apply(self, wspds, height, shear_to):
"""
Applies average shear calculated to a wind speed time series to scale wind speed from one height to another.
:param self: Average object to use when applying shear to the data.
:type self: Average object
:param wspds: Wind speed time series to apply shear to.
:type wspds: pandas.Series
:param height: height of above wspds.
:type height: float
:param shear_to: height to which wspds should be scaled to.
:type shear_to: float
:return: a pandas.Series of the scaled wind speeds.
:rtype: pandas.Series
**Example Usage**
::
import brightwind as bw
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Get power law object
average_power_law = bw.Shear.Average(anemometers, heights)
average_log_law = bw.Shear.Average(anemometers, heights, calc_method='log_law', max_plot_height=120)
# Scale wind speeds using exponents
average_power_law.apply(data['Spd40mN'], height=40, shear_to=70)
average_log_law.apply(data['Spd40mN'], height=40, shear_to=70)
"""
return Shear._apply(self, wspds, height, shear_to)
class BySector:
def __init__(self, wspds, heights, wdir, min_speed=3, calc_method='power_law', sectors=12,
direction_bin_array=None, direction_bin_labels=None):
"""
Calculates alpha, using the power law, or the roughness coefficient, using the log law, for a wind series
binned by direction. The alpha/roughness coefficient values are calculated based on the average wind speeds
at each measurement height in each bin.
:param wspds: pandas.DataFrame, list of pandas.Series or list of wind speeds to be used for calculating
shear.
:type wspds: pandas.DataFrame, list of pandas.Series or list.
:param heights: List of anemometer heights
:type heights: list
:param wdir: Wind direction measurements
:type wdir: pandas.DataFrame or Series
:param: min_speed: Only speeds higher than this would be considered for calculating shear, default is 3.
:type: min_speed: float
:param calc_method: Method to use for calculation, either 'power_law' (returns alpha) or 'log_law'
(returns the roughness coefficient).
:type calc_method: str
:param sectors: Number of sectors for the shear to be calculated for.
:type sectors: int
:param direction_bin_array: Specific array of directional bins to be used. If None, bins are calculated
by 360/sectors.
:type direction_bin_array: list or array
:param direction_bin_labels: Labels to be given to the above direction_bin array.
:type direction_bin_labels: list or array
:return: BySector object containing calculated alpha/roughness coefficient values, a plot and other data.
:rtype: BySector object
**Example usage**
::
import brightwind as bw
import pprint
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
directions = data['Dir78mS']
# Calculate shear exponents using default bins ([345,15,45,75,105,135,165,195,225,255,285,315,345])
by_sector_power_law= bw.Shear.BySector(anemometers, heights, directions)
by_sector_log_law= bw.Shear.BySector(anemometers, heights, directions, calc_method='log_law')
# Calculate shear exponents using custom bins
custom_bins = [0,30,60,90,120,150,180,210,240,270,300,330,360]
by_sector_power_law_custom_bins = bw.Shear.BySector(anemometers, heights, directions,
direction_bin_array=custom_bins)
# Get alpha or roughness values calculated
by_sector_power_law.alpha
by_sector_log_law.roughness
# View plot
by_sector_power_law.plot
by_sector_log_law.plot
# View input data
by_sector_power_law.wspds
by_sector_log_law.wspds
# View other information
pprint.pprint(by_sector_power_law.info)
pprint.pprint(by_sector_log_law.info)
"""
print('This may take a while...')
wspds, cvg = Shear._data_prep(wspds=wspds, heights=heights, min_speed=min_speed)
if direction_bin_array is not None:
sectors = len(direction_bin_array) - 1
wdir = _convert_df_to_series(wdir)
mean_wspds = pd.Series([])
mean_wspds_df = pd.DataFrame([])
count_df = pd.DataFrame([])
count = pd.Series([])
for i in range(len(wspds.columns)):
w = wspds.iloc[:, i]
plot, mean_wspds[i] = dist_by_dir_sector(w, wdir, direction_bin_array=direction_bin_array,
sectors=sectors,
aggregation_method='mean', return_data=True)
plot, count[i] = dist_by_dir_sector(w, wdir, direction_bin_array=direction_bin_array,
sectors=sectors,
aggregation_method='count', return_data=True)
if i == 0:
mean_wspds_df = mean_wspds[i].copy()
count_df = count[i].copy()
else:
mean_wspds_df = pd.concat([mean_wspds_df, mean_wspds[i]], axis=1)
count_df = pd.concat([count_df, count[i]], axis=1)
count_df = count_df.mean(axis=1)
wind_rose_plot, wind_rose_dist = dist_by_dir_sector(wspds.iloc[:, 0], wdir,
direction_bin_array=direction_bin_array,
sectors=sectors,
direction_bin_labels=direction_bin_labels,
return_data=True)
if calc_method == 'power_law':
alpha = mean_wspds_df.apply(Shear._calc_power_law, heights=heights, return_coeff=False, axis=1)
wind_rose_plot, wind_rose_dist = dist_by_dir_sector(wspds.iloc[:, 0], wdir,
direction_bin_array=direction_bin_array,
sectors=sectors,
direction_bin_labels=direction_bin_labels,
return_data=True)
self.alpha_count = count_df
self._alpha = pd.Series(alpha, name='alpha')
clear_output()
self.plot = plt.plot_shear_by_sector(scale_variable=alpha, wind_rose_data=wind_rose_dist,
calc_method=calc_method)
elif calc_method == 'log_law':
slope_intercept = mean_wspds_df.apply(Shear._calc_log_law, heights=heights, return_coeff=True, axis=1)
slope = slope_intercept.iloc[:, 0]
intercept = slope_intercept.iloc[:, 1]
roughness = Shear._calc_roughness(slope=slope, intercept=intercept)
self.roughness_count = count_df
self._roughness = pd.Series(roughness, name='roughness_coefficient')
clear_output()
self.plot = plt.plot_shear_by_sector(scale_variable=roughness, wind_rose_data=wind_rose_dist,
calc_method=calc_method)
else:
raise ValueError("Please enter a valid calculation method, either 'power_law' or 'log_law'.")
self.wspds = wspds
self.wdir = wdir
self.origin = 'BySector'
self.sectors = sectors
self.calc_method = calc_method
self.info = Shear._create_info(self, heights=heights, cvg=cvg, min_speed=min_speed,
direction_bin_array=direction_bin_array)
@property
def alpha(self):
return self._alpha
@property
def roughness(self):
return self._roughness
def apply(self, wspds, wdir, height, shear_to):
"""
Applies shear calculated to a wind speed time series by wind direction to scale
wind speed from one height to another.
:param self: BySector object to use when applying shear to the data
:type self: BySector object
:param wspds: Wind speed time series to apply shear to.
:type wspds: pandas.Series
:param wdir: Wind direction measurements of wspds, only required if shear is to be applied by direction
sector.
:type wdir: pandas.Series
:param height: Height of wspds.
:type height: float
:param shear_to: Height to which wspds should be scaled to.
:type shear_to: float
:return: A pandas.Series of the scaled wind speeds.
:rtype: pandas.Series
**Example Usage**
::
import brightwind as bw
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS']]
heights = [80, 60]
directions = data[['Dir78mS']]
# Calculate shear exponents
by_sector_power_law = bw.Shear.BySector(anemometers, heights, directions)
by_sector_log_law = bw.Shear.BySector(anemometers, heights, directions, calc_method='log_law')
# Calculate shear exponents using default bins ([345,15,45,75,105,135,165,195,225,255,285,315,345])
by_sector_power_law= bw.Shear.BySector(anemometers, heights, directions)
# Scale wind speeds using exponents
by_sector_power_law.apply(data['Spd40mN'], data['Dir38mS'], height=40, shear_to=70)
by_sector_log_law.apply(data['Spd40mN'], data['Dir38mS'], height=40, shear_to=70)
"""
return Shear._apply(self, wspds=wspds, height=height, shear_to=shear_to, wdir=wdir)
@staticmethod
def _log_roughness_scale(wspds, height, shear_to, roughness):
"""
Scale wind speeds using the logarithmic wind shear law.
:param wspds: wind speeds at height z1, U1
:param height: z1
:param shear_to: z2
:param roughness: z0
:return: Scaled wind speeds, U2
:rtype: pandas.Series or float
METHODOLOGY:
U2 = (ln(z2/z0)/ln(z1/z0))U1
Where:
- U2 is the wind speed at height z2
- U1 is the wind speed at height z1
- z1 is the lower height
- z2 is the upper height
- zo is the roughness coefficient
"""
scale_factor = np.log(shear_to / roughness) / (np.log(height / roughness))
scaled_wspds = wspds * scale_factor
return scaled_wspds
@staticmethod
def _calc_log_law(wspds, heights, return_coeff=False, maximise_data=False) -> (np.array, float):
"""
Derive the best fit logarithmic law line from a given time-step of speed data at 2 or more elevations
:param wspds: List of wind speeds [m/s]
:param heights: List of heights [m above ground]. The position of the height in the list must be the same
position in the list as its corresponding wind speed value.
:return: The slope and intercept of the best fit line, as defined above
:rtype: pandas.Series and float
METHODOLOGY:
Derive natural log of elevation data sets
Derive coefficients of linear best fit along ln(heights)- wspds distribution
Characterise new distribution of speed values based on linear best fit
Return the slope and the intercept of this linear best fit.
The slope and intercept can then be used to find the corresponding roughness coefficient, using the
equivilant laws:
1) $U(z) = (v/k)*ln(z/zo)$
which can be rewritten as:
$U(z) = (v/k)*ln(z) - (v/k)ln(zo)$
where zo = e ** (-c/m) of this line
Where:
- U(z) is the wind speed at height z
- v is the friction velocity at the location
- k is the Von Karmen constant, taken aa .4
- z is the height
- zo is the roughness coefficient
2) $U2 = (ln(z2/z0)/ln(z1/z0))U1$
"""
if maximise_data:
log_heights = np.log(
pd.Series(heights).drop(wspds[wspds == 0].index.values.astype(int))) # take log of elevations
wspds = wspds.drop(wspds[wspds == 0].index.values.astype(int))
else:
log_heights = np.log(heights) # take log of elevations
coeffs = np.polyfit(log_heights, wspds, deg=1)
if return_coeff:
return pd.Series([coeffs[0], coeffs[1]])
return coeffs[0]
@staticmethod
def _calc_power_law(wspds, heights, return_coeff=False, maximise_data=False) -> (np.array, float):
"""
Derive the best fit power law exponent (as 1/alpha) from a given time-step of speed data at 2 or more elevations
:param wspds: pandas.Series or list of wind speeds [m/s]
:param heights: List of heights [m above ground]. The position of the height in the list must be the same
position in the list as its corresponding wind speed value.
:return: The shear value (alpha), as the inverse exponent of the best fit power law, based on the form:
$(v1/v2) = (z1/z2)^(1/alpha)$
:rtype: pandas.Series and float
METHODOLOGY:
Derive natural log of elevation and speed data sets
Derive coefficients of linear best fit along log-log distribution
Characterise new distribution of speed values based on linear best fit
Derive 'alpha' based on gradient of first and last best fit points (function works for 2 or more points)
Return alpha value
"""
if maximise_data:
log_heights = np.log(pd.Series(heights).drop(wspds[wspds == 0].index.values.astype(int)))
log_wspds = np.log(wspds.drop(wspds[wspds == 0].index.values.astype(int)))
else:
log_heights = np.log(heights) # take log of elevations
log_wspds = np.log(wspds) # take log of speeds
coeffs = np.polyfit(log_heights, log_wspds, deg=1) # get coefficients of linear best fit to log distribution
if return_coeff:
return pd.Series([coeffs[0], np.exp(coeffs[1])])
return coeffs[0]
@staticmethod
def _calc_roughness(slope, intercept):
return e**(-intercept/slope)
@staticmethod
def _by_12x24(wspds, heights, min_speed=3, return_data=False, var_name='Shear'):
tab_12x24 = dist_12x24(wspds[(wspds > min_speed).all(axis=1)].apply(Shear._calc_power_law, heights=heights,
axis=1), return_data=True)[1]
if return_data:
return plt.plot_12x24_contours(tab_12x24, label=(var_name, 'mean')), tab_12x24
return plt.plot_12x24_contours(tab_12x24, label=(var_name, 'mean'))
@staticmethod
def scale(wspd, height, shear_to, alpha=None, roughness=None, calc_method='power_law'):
"""
Scales wind speeds from one height to another given a value of alpha or roughness coefficient (zo)
:param wspd: Wind speed time series to apply shear to.
:type wspd: pandas.Series
:param height: height of above wspds.
:type height: float
:param shear_to: Height to which wspd should be scaled to.
:type shear_to: float
:param alpha: Shear exponent to be used when scaling wind speeds.
:type alpha: Float
:param roughness: Roughness coefficient to be used when scaling wind speeds.
:type roughness: float
:param calc_method: calculation method used to scale the wind speed.
Using either: 1) 'power_law' : $(v1/v2) = (z1/z2)^(1/alpha)$
2) 'log_law': $v2 = (ln(z2/z0)/ln(z1/z0))v1$
:type calc_method: string
:return: a pandas series of the scaled wind speed
:return: pandas.Series or float
**Example Usage**
::
# Scale wind speeds using exponents
# Specify alpha to use
alpha_value = .2
# Specify roughness coefficient to use
zo = .03
height = 40
shear_to = 80
scaled_by_power_law = bw.Shear.scale(data['Spd40mN'], height, shear_to, alpha=alpha_value)
scaled_by_log_law = bw.Shear.scale(data['Spd40mN'], height, shear_to, roughness=zo, calc_method='log_law')
"""
return Shear._scale(wspds=wspd, height=height, shear_to=shear_to, calc_method=calc_method,
alpha=alpha, roughness=roughness)
@staticmethod
def _scale(wspds, height, shear_to, calc_method='power_law', alpha=None, roughness=None, origin=None):
"""
Private function for execution of scale()
"""
if not isinstance(wspds, pd.Series):
wspds = pd.Series(wspds)
if calc_method == 'power_law':
scale_factor = (shear_to / height) ** alpha
scaled_wspds = wspds * scale_factor
elif calc_method == 'log_law':
if origin == 'TimeSeries':
scaled_wspds = Shear._log_roughness_scale(wspds=wspds, height=height,
shear_to=shear_to, roughness=roughness)
else:
scaled_wspds = wspds.apply(Shear._log_roughness_scale, args=(height, shear_to, roughness))
else:
raise ValueError("Please enter a valid calculation method, either 'power_law' or 'log_law'.")
return scaled_wspds
@staticmethod
def _apply(self, wspds, height, shear_to, wdir=None):
scaled_wspds = pd.Series([])
result = | pd.Series([]) | pandas.Series |
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
plt.rcParams['font.size'] = 6
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
graphs_path = root_path+'/boundary_effect/graph/'
if not os.path.exists(graphs_path):
os.makedirs(graphs_path)
time = pd.read_csv(root_path+'/time_series/MonthlyRunoffWeiRiver.csv')['Time']
time = time.values
time = [datetime.strptime(t,'%Y/%m') for t in time]
time = [t.strftime('%b %Y') for t in time]
print(time)
# CHECK 1: is SSA shift-invariant?
# If yes, any shifted copy of an IMF from a SSA decomposition, similar to a
# shifted copy of the original time series, should be maintained.
# For example, given the sunspot time series x (of length 792) we can
# generate a 1-step advanced copy of the original time series as follows:
# x0=(1:791)
# x1=(2:792) this is a 1-step advanced version of x0
# Observiously, shift-invariancy is preserved between x0 and x1 since
# x0(2:791)=x1(1:790)
# For shift-invariancy to be preserved for SSA, we would observe, for
# example, that the SSA Trend components for x0 (imf1 of x0) and x1 (imf1 of
# x1) should be exact copies of one another, advanced by a single step.
# i.e., x0_imf(2:791,1) should equal x1_imf(1:790,1) if shift-invariancy
# is preserved.
# As the case for SSA shown below, we can see the x0_imf(2:791,1) basically
# equal to x1_imf(1:790,1) except for a few samples close to the begin and
# end of x0 and x1. Interestingly, we see a low level of error close to the
# begin of the time series and a high level of error close to the end of
# the time series, of high importance in operational forecasting tasks.
# The errors along the middle range are zeros indicating SSA is
# shift-invariant.
# We argue that the error close to the boundaries are
# caused by boundary effect, which is the exact problem this study designed
# to solve.
# CHECK 2: The impact of appedning data points to a time series then
# performing SSA, analogous the case in operational forecasting when new
# data becomes available and an updated forecast is made using the newly
# arrived data.
# Ideally, for forecasting situations, when new data is appended to a time
# series and some preprocessing is performed, it should not have an impact
# on previous measurements of the pre-processed time series.
# For example, if Trend_1:N represents the Trend, which has N total
# measurements and was derived by applying SSA to x_1:N the we would expect
# that when we perform SSA when x is appended with another measurement,
# i.e., x_1:N+1, resulting in Trend_1:N+1 that the first 1:N measurements in
# Trend_1:N+1 are equal to Trend_1:N. In other words,
# Trend_1:N+1[1:N]=Trend_1:N[1:N].
# We see than is not the case. Appending an additional observation to the
# time series results in the updated SSA components to be entirely
# different then the original (as of yet updated) SSA components.
# Interesting, we see a high level of error at the boundaries of the time
# seriesm, of high importance in operational forecasting tasks.
x0_imf = pd.read_csv(root_path+'/boundary_effect/ssa-decompositions-huaxian/x0_dec.csv')
x1_imf = pd.read_csv(root_path+'/boundary_effect/ssa-decompositions-huaxian/x1_dec.csv')
x_1_552_imf = pd.read_csv(root_path+"/boundary_effect/ssa-decompositions-huaxian/x_1_552_dec.csv")
x_1_791_imf = pd.read_csv(root_path+'/boundary_effect/ssa-decompositions-huaxian/x_1_791_dec.csv')
x_1_792_imf = pd.read_csv(root_path+'/boundary_effect/ssa-decompositions-huaxian/x_1_792_dec.csv')
x0_imf1_2_791 = x0_imf['Trend'][1:790]
x0_imf1_2_791 = x0_imf1_2_791.reset_index(drop=True)
x1_imf1_1_790 = x1_imf['Trend'][0:789]
x1_imf1_1_790 = x1_imf1_1_790.reset_index(drop=True)
err = x0_imf1_2_791-x1_imf1_1_790
# err_df = pd.DataFrame(err.values,columns=['err'])
# print(err)
err.to_csv(root_path+'/results_analysis/results/shift_variance_err.csv')
x_1_552_imf1 = x_1_552_imf['Trend']
x_1_791_imf1 = x_1_791_imf['Trend']
x_1_792_imf1 = x_1_792_imf['Trend']
err_append_one = x_1_792_imf1[0:790]-x_1_791_imf1[0:790]
err_append_several = x_1_792_imf1[0:551]-x_1_552_imf1[0:551]
err_append_one_df = pd.DataFrame(err_append_one,columns=['err'])
err_append_several_df = pd.DataFrame(err_append_several,columns=['err'])
print(err_append_one_df)
print(err_append_several_df)
err_append_one.to_csv(root_path+'/results_analysis/results/err_append_one.csv')
err_append_several.to_csv(root_path+'/results_analysis/results/err_append_several.csv')
xx = -6
aceg_y = 13.5
bdf_y = 2.4
y_min = 0
y_max = 16
ye_min = -1.3
ye_max = 3.1
plt.figure(figsize=(7.48,6))
plt.subplot(4,2,1)
plt.text(xx,aceg_y,'(a)',fontsize=7,fontweight='bold',bbox=dict(facecolor='thistle', alpha=0.25))
plt.plot(x0_imf1_2_791,c='b',label=r'$S_{1}(2:791)$ of $x_{0}$')
plt.plot(x1_imf1_1_790,c='g',label=r'$S_{1}(1:790)$ of $x_{1}$')
plt.xlabel('Time (From '+time[1]+' to '+time[790]+')')
plt.ylabel(r"Runoff($10^8m^3$)")
plt.ylim(y_min,y_max)
plt.legend(ncol=2)
plt.subplot(4,2,2)
plt.text(760,-0.105,'(b)',fontsize=7,fontweight='bold',bbox=dict(facecolor='thistle', alpha=0.25))
shift_var=plt.plot(err,'o',markerfacecolor='w',markeredgecolor='r',markersize=4.5,
label=R'''Error between $S_{1}(2:791)$
of $x_{0}$ and $S_{1}(1:790)$ of $x_{1}$''')
plt.xlabel('Time (From '+time[1]+' to '+time[790]+')')
plt.ylabel(r"Runoff($10^8m^3$)")
plt.legend(loc='lower center')
plt.subplot(4,2,3)
plt.text(xx,aceg_y,'(c)',fontsize=7,fontweight='bold',bbox=dict(facecolor='thistle', alpha=0.25))
plt.plot(x_1_791_imf1,c='b',label=r'$S_{1}$ of $x_{1-791}$')
plt.plot(x_1_792_imf1,c='g',label=r'$S_{1}$ of $x_{1-792}$')
plt.xlabel('Time (From '+time[0]+' to '+time[791]+')')
plt.ylabel(r"Runoff($10^8m^3$)")
plt.ylim(y_min,y_max)
plt.legend(ncol=2)
plt.subplot(4,2,4)
plt.text(xx,0.04,'(d)',fontsize=7,fontweight='bold',bbox=dict(facecolor='thistle', alpha=0.25))
plt.plot(err_append_one,'o',markerfacecolor='w',markeredgecolor='r',markersize=4.5,
label=R'''Error between $S_{1}(1:791)$ of
$x_{1-791}$ and $S_{1}(1:791)$ of $x_{1-792}$''')
plt.xlabel('Time (From '+time[0]+' to '+time[790]+')')
plt.ylabel(r"Runoff($10^8m^3$)")
plt.legend(loc='lower left')
plt.subplot(4,2,5)
plt.text(xx,aceg_y,'(e)',fontsize=7,fontweight='bold',bbox=dict(facecolor='thistle', alpha=0.25))
plt.plot(x_1_552_imf1,c='b',label=r'$S_{1}$ of $x_{1-552}$')
plt.plot(x_1_792_imf1,c='g',label=r'$S_{1}$ of $x_{1-792}$')
plt.xlabel('Time (From '+time[0]+' to '+time[791]+')')
plt.ylabel(r"Runoff($10^8m^3$)")
plt.ylim(y_min,y_max)
plt.legend(ncol=2)
plt.subplot(4,2,6)
plt.text(xx,0.18,'(f)',fontsize=7,fontweight='bold',bbox=dict(facecolor='thistle', alpha=0.25))
plt.plot(err_append_several,'o',markerfacecolor='w',markeredgecolor='r',markersize=4.5,
label=R'''Error between $S_{1}(1:552)$ of
$x_{1-552}$ and $S_{1}(1:552)$ of $x_{1-792}$''')
plt.xlabel('Time (From '+time[0]+' to '+time[551]+')')
plt.ylabel(r"Runoff($10^8m^3$)")
plt.legend(loc=9,ncol=2)
ssa_train = pd.read_csv(root_path+"/Huaxian_ssa/data/SSA_TRAIN.csv")
ssa_full = | pd.read_csv(root_path+"/Huaxian_ssa/data/SSA_FULL.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
from warnings import catch_warnings
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
import pandas as pd
from pandas.core import config as cf
from pandas.compat import u
from pandas._libs.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import (
array_equivalent, isnull, notnull,
na_value_for_dtype)
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
class TestIsNull(object):
def test_0d_array(self):
assert isnull(np.array(np.nan))
assert not isnull(np.array(0.0))
assert not isnull(np.array(0))
# test object dtype
assert isnull(np.array(np.nan, dtype=object))
assert not isnull(np.array(0.0, dtype=object))
assert not isnull(np.array(0, dtype=object))
def test_empty_object(self):
for shape in [(4, 0), (4,)]:
arr = np.empty(shape=shape, dtype=object)
result = isnull(arr)
expected = np.ones(shape=shape, dtype=bool)
tm.assert_numpy_array_equal(result, expected)
def test_isnull(self):
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert float('nan')
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert isinstance(isnull(s), Series)
# frame
for df in [tm.makeTimeDataFrame(), tm.makePeriodFrame(),
tm.makeMixedDataFrame()]:
result = isnull(df)
expected = df.apply(isnull)
tm.assert_frame_equal(result, expected)
# panel
with catch_warnings(record=True):
for p in [tm.makePanel(), tm.makePeriodPanel(),
tm.add_nans(tm.makePanel())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel_equal(result, expected)
# panel 4d
with catch_warnings(record=True):
for p in [tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel4d_equal(result, expected)
def test_isnull_lists(self):
result = isnull([[False]])
exp = np.array([[False]])
tm.assert_numpy_array_equal(result, exp)
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
tm.assert_numpy_array_equal(result, exp)
# list of strings / unicode
result = isnull(['foo', 'bar'])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
result = isnull([u('foo'), u('bar')])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
def test_isnull_nat(self):
result = isnull([NaT])
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
result = isnull(np.array([NaT], dtype=object))
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
def test_isnull_numpy_nat(self):
arr = np.array([NaT, np.datetime64('NaT'), np.timedelta64('NaT'),
np.datetime64('NaT', 's')])
result = isnull(arr)
expected = np.array([True] * 4)
tm.assert_numpy_array_equal(result, expected)
def test_isnull_datetime(self):
assert not isnull(datetime.now())
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
exp = np.ones(len(idx), dtype=bool)
tm.assert_numpy_array_equal(notnull(idx), exp)
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
# GH 9129
pidx = idx.to_period(freq='M')
mask = isnull(pidx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
mask = isnull(pidx[1:])
exp = np.zeros(len(mask), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
def test_datetime_other_units(self):
idx = pd.DatetimeIndex(['2011-01-01', 'NaT', '2011-01-02'])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(idx), exp)
tm.assert_numpy_array_equal(notnull(idx), ~exp)
tm.assert_numpy_array_equal(isnull(idx.values), exp)
tm.assert_numpy_array_equal(notnull(idx.values), ~exp)
for dtype in ['datetime64[D]', 'datetime64[h]', 'datetime64[m]',
'datetime64[s]', 'datetime64[ms]', 'datetime64[us]',
'datetime64[ns]']:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(values), exp)
tm.assert_numpy_array_equal(notnull(values), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(values)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
s = pd.Series(values, dtype=object)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
def test_timedelta_other_units(self):
idx = pd.TimedeltaIndex(['1 days', 'NaT', '2 days'])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal( | isnull(idx) | pandas.core.dtypes.missing.isnull |
import pandas as pd
from abc import ABC
from geopandas import GeoDataFrame
from carto.do_dataset import DODataset
from . import subscriptions
from ....utils.geom_utils import set_geometry
from ....utils.logger import log
_DATASET_READ_MSG = '''To load it as a DataFrame you can do:
df = pandas.read_csv('{}')
'''
_GEOGRAPHY_READ_MSG = '''To load it as a GeoDataFrame you can do:
from cartoframes.utils import decode_geometry
df = pandas.read_csv('{}')
gdf = GeoDataFrame(df, geometry=decode_geometry(df['geom']))
'''
GEOM_COL = 'geom'
class CatalogEntity(ABC):
"""This is an internal class the rest of the classes related to the catalog discovery extend.
It contains:
- Properties: `id`, `slug` (a shorter ID).
- Static methods: `get`, `get_all`, `get_list` to retrieve elements or lists of objects in the catalog such as
datasets, categories, variables, etc.
- Instance methods to convert to pandas Series, Python dict, compare instances, etc.
As a rule of thumb you don't directly use this class, it is documented for inheritance purposes.
"""
id_field = 'id'
_entity_repo = None
export_excluded_fields = ['summary_json', 'geom_coverage']
def __init__(self, data):
self.data = data
@property
def id(self):
"""The ID of the entity."""
return self.data[self.id_field]
@property
def slug(self):
"""The slug (short ID) of the entity."""
try:
return self.data['slug']
except KeyError:
return None
@classmethod
def get(cls, id_):
"""Get an instance of an entity by ID or slug.
Args:
id_ (str):
ID or slug of a catalog entity.
Raises:
CatalogError: if there's a problem when connecting to the catalog or no entities are found.
"""
return cls._entity_repo.get_by_id(id_)
@classmethod
def get_all(cls, filters=None):
"""List all instances of an entity.
Args:
filters (dict, optional):
Dict containing pairs of entity properties and its value to be used as filters to query the available
entities. If none is provided, no filters will be applied to the query.
"""
return cls._entity_repo.get_all(filters)
@classmethod
def get_list(cls, id_list):
"""Get a list of instance of an entity by a list of IDs or slugs.
Args:
id_list (list):
List of ID or slugs of entities in the catalog to retrieve instances.
Raises:
CatalogError: if there's a problem when connecting to the catalog or no entities are found.
"""
return cls._entity_repo.get_by_id_list(id_list)
def to_series(self):
"""Converts the entity instance to a pandas Series."""
return pd.Series(self.data)
def to_dict(self):
"""Converts the entity instance to a Python dict."""
return {key: value for key, value in self.data.items() if key not in self.export_excluded_fields}
def is_subscribed(self, credentials, entity_type):
"""Check if the entity is subscribed"""
return self.is_public_data or self.id in subscriptions.get_subscription_ids(credentials, entity_type)
def __eq__(self, other):
return self.data == other.data
def __ne__(self, other):
return not self == other
def __str__(self):
return '{classname}({data})'.format(classname=self.__class__.__name__, data=self.data.__str__())
def __repr__(self):
id = self._get_print_id()
return "<{classname}.get('{entity_id}')>".format(classname=self.__class__.__name__, entity_id=id)
def _get_print_id(self):
if 'slug' in self.data.keys():
return self.data['slug']
return self.id
def _download(self, credentials, file_path=None, limit=None, order_by=None, sql_query=None, add_geom=None):
auth_client = credentials.get_api_key_auth_client()
is_geography = None
if sql_query is not None:
is_geography = self.__class__.__name__ == 'Geography'
rows = DODataset(auth_client=auth_client).name(self.id).download_stream(limit=limit,
order_by=order_by,
sql_query=sql_query,
add_geom=add_geom,
is_geography=is_geography)
if file_path:
with open(file_path, 'w') as csvfile:
for row in rows:
csvfile.write(row.decode('utf-8'))
log.info('Data saved: {}'.format(file_path))
if self.__class__.__name__ == 'Dataset':
log.info(_DATASET_READ_MSG.format(file_path))
elif self.__class__.__name__ == 'Geography':
log.info(_GEOGRAPHY_READ_MSG.format(file_path))
else:
dataframe = pd.read_csv(rows)
gdf = GeoDataFrame(dataframe)
if GEOM_COL in gdf:
set_geometry(gdf, GEOM_COL, inplace=True)
return gdf
def _get_remote_full_table_name(self, user_project, user_dataset, public_project):
project, dataset, table = self.id.split('.')
if project != public_project:
return '{project}.{dataset}.{table_name}'.format(
project=user_project,
dataset=user_dataset,
table_name='view_{}_{}'.format(dataset, table)
)
else:
return self.id
def is_slug_value(id_value):
return len(id_value.split('.')) == 1
class CatalogList(list):
"""This is an internal class that represents a list of entities in the catalog of the same type.
It contains:
- Instance methods to convert to get an instance of the entity by ID and to convert the list to a pandas
DataFrame for further filtering and exploration.
As a rule of thumb you don't directly use this class, it is documented for inheritance purposes.
"""
def __init__(self, data):
super(CatalogList, self).__init__(data)
def to_dataframe(self):
"""Converts a list to a pandas DataFrame.
Examples:
>>> catalog = Catalog()
>>> catalog.categories.to_dataframe()
"""
df = | pd.DataFrame([item.data for item in self]) | pandas.DataFrame |
import math
import queue
from datetime import datetime, timedelta, timezone
import pandas as pd
from storey import build_flow, SyncEmitSource, Reduce, Table, AggregateByKey, FieldAggregator, NoopDriver, \
DataframeSource
from storey.dtypes import SlidingWindows, FixedWindows, EmitAfterMaxEvent, EmitEveryEvent
test_base_time = datetime.fromisoformat("2020-07-21T21:40:00+00:00")
def append_return(lst, x):
lst.append(x)
return lst
def test_sliding_window_simple_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg", "min", "max"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0,
'number_of_stuff_max_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1,
'number_of_stuff_max_24h': 1, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_sum_24h': 3, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2,
'number_of_stuff_max_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_stuff_min_1h': 1,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 3, 'number_of_stuff_max_2h': 3,
'number_of_stuff_max_24h': 3, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 1.5, 'number_of_stuff_avg_24h': 1.5},
{'col1': 4, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 10, 'number_of_stuff_sum_24h': 10, 'number_of_stuff_min_1h': 2,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 4, 'number_of_stuff_max_2h': 4,
'number_of_stuff_max_24h': 4, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 15, 'number_of_stuff_sum_24h': 15, 'number_of_stuff_min_1h': 3,
'number_of_stuff_min_2h': 1, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 5, 'number_of_stuff_max_2h': 5,
'number_of_stuff_max_24h': 5, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 2.5},
{'col1': 6, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 21, 'number_of_stuff_min_1h': 4,
'number_of_stuff_min_2h': 2, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 6, 'number_of_stuff_max_2h': 6,
'number_of_stuff_max_24h': 6, 'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'number_of_stuff_sum_1h': 18, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 28, 'number_of_stuff_min_1h': 5,
'number_of_stuff_min_2h': 3, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 7, 'number_of_stuff_max_2h': 7,
'number_of_stuff_max_24h': 7, 'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 3.5},
{'col1': 8, 'number_of_stuff_sum_1h': 21, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_sum_24h': 36, 'number_of_stuff_min_1h': 6,
'number_of_stuff_min_2h': 4, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 8, 'number_of_stuff_max_2h': 8,
'number_of_stuff_max_24h': 8, 'number_of_stuff_avg_1h': 7.0, 'number_of_stuff_avg_2h': 6.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'number_of_stuff_sum_1h': 24, 'number_of_stuff_sum_2h': 35, 'number_of_stuff_sum_24h': 45, 'number_of_stuff_min_1h': 7,
'number_of_stuff_min_2h': 5, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 9, 'number_of_stuff_max_2h': 9,
'number_of_stuff_max_24h': 9, 'number_of_stuff_avg_1h': 8.0, 'number_of_stuff_avg_2h': 7.0, 'number_of_stuff_avg_24h': 4.5}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_sparse_data():
controller = build_flow([
SyncEmitSource(),
AggregateByKey(
[FieldAggregator("number_of_stuff1", "col1", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_stuff2", "col2", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
controller.emit({'col1': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.emit({'col2': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': math.nan, 'number_of_stuff2_avg_24h': math.nan, 'number_of_stuff2_avg_2h': math.nan,
'number_of_stuff2_max_1h': math.nan, 'number_of_stuff2_max_24h': math.nan, 'number_of_stuff2_max_2h': math.nan,
'number_of_stuff2_min_1h': math.nan, 'number_of_stuff2_min_24h': math.nan, 'number_of_stuff2_min_2h': math.nan,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col1': 1, 'number_of_stuff1_avg_1h': 0.5, 'number_of_stuff1_avg_24h': 0.5, 'number_of_stuff1_avg_2h': 0.5,
'number_of_stuff1_max_1h': 1, 'number_of_stuff1_max_24h': 1, 'number_of_stuff1_max_2h': 1,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 1, 'number_of_stuff1_sum_24h': 1, 'number_of_stuff1_sum_2h': 1,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 1, 'number_of_stuff1_avg_1h': 0.5, 'number_of_stuff1_avg_24h': 0.5, 'number_of_stuff1_avg_2h': 0.5,
'number_of_stuff1_max_1h': 1, 'number_of_stuff1_max_24h': 1, 'number_of_stuff1_max_2h': 1,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 1, 'number_of_stuff1_sum_24h': 1, 'number_of_stuff1_sum_2h': 1,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col1': 2, 'number_of_stuff1_avg_1h': 1.0, 'number_of_stuff1_avg_24h': 1.0, 'number_of_stuff1_avg_2h': 1.0,
'number_of_stuff1_max_1h': 2, 'number_of_stuff1_max_24h': 2, 'number_of_stuff1_max_2h': 2,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 3, 'number_of_stuff1_sum_24h': 3, 'number_of_stuff1_sum_2h': 3,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col2': 2, 'number_of_stuff1_avg_1h': 1.0, 'number_of_stuff1_avg_24h': 1.0, 'number_of_stuff1_avg_2h': 1.0,
'number_of_stuff1_max_1h': 2, 'number_of_stuff1_max_24h': 2, 'number_of_stuff1_max_2h': 2,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 3, 'number_of_stuff1_sum_24h': 3, 'number_of_stuff1_sum_2h': 3,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col1': 3, 'number_of_stuff1_avg_1h': 2.0, 'number_of_stuff1_avg_24h': 1.5, 'number_of_stuff1_avg_2h': 1.5,
'number_of_stuff1_max_1h': 3, 'number_of_stuff1_max_24h': 3, 'number_of_stuff1_max_2h': 3,
'number_of_stuff1_min_1h': 1, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 6, 'number_of_stuff1_sum_24h': 6, 'number_of_stuff1_sum_2h': 6,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col2': 3, 'number_of_stuff1_avg_1h': 2.0, 'number_of_stuff1_avg_24h': 1.5, 'number_of_stuff1_avg_2h': 1.5,
'number_of_stuff1_max_1h': 3, 'number_of_stuff1_max_24h': 3, 'number_of_stuff1_max_2h': 3,
'number_of_stuff1_min_1h': 1, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 6, 'number_of_stuff1_sum_24h': 6, 'number_of_stuff1_sum_2h': 6,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col1': 4, 'number_of_stuff1_avg_1h': 3.0, 'number_of_stuff1_avg_24h': 2.0, 'number_of_stuff1_avg_2h': 2.0,
'number_of_stuff1_max_1h': 4, 'number_of_stuff1_max_24h': 4, 'number_of_stuff1_max_2h': 4,
'number_of_stuff1_min_1h': 2, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 9, 'number_of_stuff1_sum_24h': 10, 'number_of_stuff1_sum_2h': 10,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col2': 4, 'number_of_stuff1_avg_1h': 3.0, 'number_of_stuff1_avg_24h': 2.0, 'number_of_stuff1_avg_2h': 2.0,
'number_of_stuff1_max_1h': 4, 'number_of_stuff1_max_24h': 4, 'number_of_stuff1_max_2h': 4,
'number_of_stuff1_min_1h': 2, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 9, 'number_of_stuff1_sum_24h': 10, 'number_of_stuff1_sum_2h': 10,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col1': 5, 'number_of_stuff1_avg_1h': 4.0, 'number_of_stuff1_avg_24h': 2.5, 'number_of_stuff1_avg_2h': 3.0,
'number_of_stuff1_max_1h': 5, 'number_of_stuff1_max_24h': 5, 'number_of_stuff1_max_2h': 5,
'number_of_stuff1_min_1h': 3, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 1,
'number_of_stuff1_sum_1h': 12, 'number_of_stuff1_sum_24h': 15, 'number_of_stuff1_sum_2h': 15,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col2': 5, 'number_of_stuff1_avg_1h': 4.0, 'number_of_stuff1_avg_24h': 2.5, 'number_of_stuff1_avg_2h': 3.0,
'number_of_stuff1_max_1h': 5, 'number_of_stuff1_max_24h': 5, 'number_of_stuff1_max_2h': 5,
'number_of_stuff1_min_1h': 3, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 1,
'number_of_stuff1_sum_1h': 12, 'number_of_stuff1_sum_24h': 15, 'number_of_stuff1_sum_2h': 15,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col1': 6, 'number_of_stuff1_avg_1h': 5.0, 'number_of_stuff1_avg_24h': 3.0, 'number_of_stuff1_avg_2h': 4.0,
'number_of_stuff1_max_1h': 6, 'number_of_stuff1_max_24h': 6, 'number_of_stuff1_max_2h': 6,
'number_of_stuff1_min_1h': 4, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 2,
'number_of_stuff1_sum_1h': 15, 'number_of_stuff1_sum_24h': 21, 'number_of_stuff1_sum_2h': 20,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col2': 6, 'number_of_stuff1_avg_1h': 5.0, 'number_of_stuff1_avg_24h': 3.0, 'number_of_stuff1_avg_2h': 4.0,
'number_of_stuff1_max_1h': 6, 'number_of_stuff1_max_24h': 6, 'number_of_stuff1_max_2h': 6,
'number_of_stuff1_min_1h': 4, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 2,
'number_of_stuff1_sum_1h': 15, 'number_of_stuff1_sum_24h': 21, 'number_of_stuff1_sum_2h': 20,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col1': 7, 'number_of_stuff1_avg_1h': 6.0, 'number_of_stuff1_avg_24h': 3.5, 'number_of_stuff1_avg_2h': 5.0,
'number_of_stuff1_max_1h': 7, 'number_of_stuff1_max_24h': 7, 'number_of_stuff1_max_2h': 7,
'number_of_stuff1_min_1h': 5, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 3,
'number_of_stuff1_sum_1h': 18, 'number_of_stuff1_sum_24h': 28, 'number_of_stuff1_sum_2h': 25,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col2': 7, 'number_of_stuff1_avg_1h': 6.0, 'number_of_stuff1_avg_24h': 3.5, 'number_of_stuff1_avg_2h': 5.0,
'number_of_stuff1_max_1h': 7, 'number_of_stuff1_max_24h': 7, 'number_of_stuff1_max_2h': 7,
'number_of_stuff1_min_1h': 5, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 3,
'number_of_stuff1_sum_1h': 18, 'number_of_stuff1_sum_24h': 28, 'number_of_stuff1_sum_2h': 25,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col1': 8, 'number_of_stuff1_avg_1h': 7.0, 'number_of_stuff1_avg_24h': 4.0, 'number_of_stuff1_avg_2h': 6.0,
'number_of_stuff1_max_1h': 8, 'number_of_stuff1_max_24h': 8, 'number_of_stuff1_max_2h': 8,
'number_of_stuff1_min_1h': 6, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 4,
'number_of_stuff1_sum_1h': 21, 'number_of_stuff1_sum_24h': 36, 'number_of_stuff1_sum_2h': 30,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col2': 8, 'number_of_stuff1_avg_1h': 7.0, 'number_of_stuff1_avg_24h': 4.0, 'number_of_stuff1_avg_2h': 6.0,
'number_of_stuff1_max_1h': 8, 'number_of_stuff1_max_24h': 8, 'number_of_stuff1_max_2h': 8,
'number_of_stuff1_min_1h': 6, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 4,
'number_of_stuff1_sum_1h': 21, 'number_of_stuff1_sum_24h': 36, 'number_of_stuff1_sum_2h': 30,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col1': 9, 'number_of_stuff1_avg_1h': 8.0, 'number_of_stuff1_avg_24h': 4.5, 'number_of_stuff1_avg_2h': 7.0,
'number_of_stuff1_max_1h': 9, 'number_of_stuff1_max_24h': 9, 'number_of_stuff1_max_2h': 9,
'number_of_stuff1_min_1h': 7, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 5,
'number_of_stuff1_sum_1h': 24, 'number_of_stuff1_sum_24h': 45, 'number_of_stuff1_sum_2h': 35,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col2': 9, 'number_of_stuff1_avg_1h': 8.0, 'number_of_stuff1_avg_24h': 4.5, 'number_of_stuff1_avg_2h': 7.0,
'number_of_stuff1_max_1h': 9, 'number_of_stuff1_max_24h': 9, 'number_of_stuff1_max_2h': 9,
'number_of_stuff1_min_1h': 7, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 5,
'number_of_stuff1_sum_1h': 24, 'number_of_stuff1_sum_24h': 45, 'number_of_stuff1_sum_2h': 35,
'number_of_stuff2_avg_1h': 8.0, 'number_of_stuff2_avg_24h': 4.5, 'number_of_stuff2_avg_2h': 7.0,
'number_of_stuff2_max_1h': 9, 'number_of_stuff2_max_24h': 9, 'number_of_stuff2_max_2h': 9,
'number_of_stuff2_min_1h': 7, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 5,
'number_of_stuff2_sum_1h': 24, 'number_of_stuff2_sum_24h': 45, 'number_of_stuff2_sum_2h': 35}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_sparse_data_uneven_feature_occurrence():
controller = build_flow([
SyncEmitSource(),
AggregateByKey(
[FieldAggregator("number_of_stuff1", "col1", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_stuff2", "col2", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
controller.emit({'col1': 0}, 'tal', test_base_time)
for i in range(10):
controller.emit({'col2': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': math.nan, 'number_of_stuff2_avg_24h': math.nan, 'number_of_stuff2_avg_2h': math.nan,
'number_of_stuff2_max_1h': math.nan, 'number_of_stuff2_max_24h': math.nan, 'number_of_stuff2_max_2h': math.nan,
'number_of_stuff2_min_1h': math.nan, 'number_of_stuff2_min_24h': math.nan, 'number_of_stuff2_min_2h': math.nan,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 1, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col2': 2, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col2': 3, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col2': 4, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col2': 5, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col2': 6, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col2': 7, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col2': 8, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col2': 9, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 8.0, 'number_of_stuff2_avg_24h': 4.5, 'number_of_stuff2_avg_2h': 7.0,
'number_of_stuff2_max_1h': 9, 'number_of_stuff2_max_24h': 9, 'number_of_stuff2_max_2h': 9,
'number_of_stuff2_min_1h': 7, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 5,
'number_of_stuff2_sum_1h': 24, 'number_of_stuff2_sum_24h': 45, 'number_of_stuff2_sum_2h': 35}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_multiple_keys_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, f'{i % 2}', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0,
'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1,
'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 2, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2, 'number_of_stuff_sum_24h': 2,
'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 4, 'number_of_stuff_sum_2h': 4, 'number_of_stuff_sum_24h': 4,
'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 4, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6,
'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 9, 'number_of_stuff_sum_24h': 9,
'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 6, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12, 'number_of_stuff_sum_24h': 12,
'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'number_of_stuff_sum_1h': 16, 'number_of_stuff_sum_2h': 16, 'number_of_stuff_sum_24h': 16,
'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 8, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 20,
'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'number_of_stuff_sum_1h': 25, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 25,
'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 5.0}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_aggregations_with_filters_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m'),
aggr_filter=lambda element: element['is_valid'] == 0)],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i, 'is_valid': i % 2}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'is_valid': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0,
'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'is_valid': 1, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0,
'number_of_stuff_avg_24h': 0.0},
{'col1': 2, 'is_valid': 0, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2,
'number_of_stuff_sum_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0,
'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'is_valid': 1, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2,
'number_of_stuff_sum_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0,
'number_of_stuff_avg_24h': 1.0},
{'col1': 4, 'is_valid': 0, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6,
'number_of_stuff_sum_24h': 6, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0,
'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'is_valid': 1, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6,
'number_of_stuff_sum_24h': 6, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0,
'number_of_stuff_avg_24h': 2.0},
{'col1': 6, 'is_valid': 0, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12,
'number_of_stuff_sum_24h': 12, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0,
'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'is_valid': 1, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12,
'number_of_stuff_sum_24h': 12, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0,
'number_of_stuff_avg_24h': 3.0},
{'col1': 8, 'is_valid': 0, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20,
'number_of_stuff_sum_24h': 20, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0,
'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'is_valid': 1, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20,
'number_of_stuff_sum_24h': 20, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0,
'number_of_stuff_avg_24h': 4.0}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_aggregations_with_max_values_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("num_hours_with_stuff_in_the_last_24h", "col1", ["count"],
SlidingWindows(['24h'], '1h'),
max_value=5)],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=10 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'num_hours_with_stuff_in_the_last_24h_count_24h': 1},
{'col1': 1, 'num_hours_with_stuff_in_the_last_24h_count_24h': 2},
{'col1': 2, 'num_hours_with_stuff_in_the_last_24h_count_24h': 3},
{'col1': 3, 'num_hours_with_stuff_in_the_last_24h_count_24h': 4},
{'col1': 4, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 5, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 6, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 7, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 8, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 9, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_simple_aggregation_flow_multiple_fields():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_things", "col2", ["count"],
SlidingWindows(['1h', '2h'], '15m')),
FieldAggregator("abc", "col3", ["sum"],
SlidingWindows(['24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i, 'col2': i * 1.2, 'col3': i * 2 + 4}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'col2': 0.0, 'col3': 4, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_things_count_1h': 1, 'number_of_things_count_2h': 1,
'abc_sum_24h': 4, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'col2': 1.2, 'col3': 6, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1,
'number_of_stuff_sum_24h': 1, 'number_of_things_count_1h': 2, 'number_of_things_count_2h': 2,
'abc_sum_24h': 10, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'col2': 2.4, 'col3': 8, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3,
'number_of_stuff_sum_24h': 3, 'number_of_things_count_1h': 3, 'number_of_things_count_2h': 3,
'abc_sum_24h': 18, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'col2': 3.5999999999999996, 'col3': 10, 'number_of_stuff_sum_1h': 6,
'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_things_count_1h': 4,
'number_of_things_count_2h': 4, 'abc_sum_24h': 28, 'number_of_stuff_avg_1h': 1.5, 'number_of_stuff_avg_2h': 1.5,
'number_of_stuff_avg_24h': 1.5},
{'col1': 4, 'col2': 4.8, 'col3': 12, 'number_of_stuff_sum_1h': 10, 'number_of_stuff_sum_2h': 10,
'number_of_stuff_sum_24h': 10, 'number_of_things_count_1h': 5, 'number_of_things_count_2h': 5,
'abc_sum_24h': 40, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'col2': 6.0, 'col3': 14, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 15,
'number_of_stuff_sum_24h': 15, 'number_of_things_count_1h': 6, 'number_of_things_count_2h': 6,
'abc_sum_24h': 54, 'number_of_stuff_avg_1h': 2.5, 'number_of_stuff_avg_2h': 2.5, 'number_of_stuff_avg_24h': 2.5},
{'col1': 6, 'col2': 7.199999999999999, 'col3': 16, 'number_of_stuff_sum_1h': 21,
'number_of_stuff_sum_2h': 21, 'number_of_stuff_sum_24h': 21, 'number_of_things_count_1h': 7,
'number_of_things_count_2h': 7, 'abc_sum_24h': 70, 'number_of_stuff_avg_1h': 3.0,
'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'col2': 8.4, 'col3': 18, 'number_of_stuff_sum_1h': 28, 'number_of_stuff_sum_2h': 28,
'number_of_stuff_sum_24h': 28, 'number_of_things_count_1h': 8, 'number_of_things_count_2h': 8,
'abc_sum_24h': 88, 'number_of_stuff_avg_1h': 3.5, 'number_of_stuff_avg_2h': 3.5, 'number_of_stuff_avg_24h': 3.5},
{'col1': 8, 'col2': 9.6, 'col3': 20, 'number_of_stuff_sum_1h': 36, 'number_of_stuff_sum_2h': 36,
'number_of_stuff_sum_24h': 36, 'number_of_things_count_1h': 9, 'number_of_things_count_2h': 9,
'abc_sum_24h': 108, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'col2': 10.799999999999999, 'col3': 22, 'number_of_stuff_sum_1h': 45,
'number_of_stuff_sum_2h': 45, 'number_of_stuff_sum_24h': 45,
'number_of_things_count_1h': 10, 'number_of_things_count_2h': 10, 'abc_sum_24h': 130,
'number_of_stuff_avg_1h': 4.5, 'number_of_stuff_avg_2h': 4.5, 'number_of_stuff_avg_24h': 4.5}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_simple_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["count"],
FixedWindows(['1h', '2h', '3h', '24h']))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 1, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 2, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 3, 'number_of_stuff_count_1h': 3, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 4,
'number_of_stuff_count_24h': 4},
{'col1': 4, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 5,
'number_of_stuff_count_24h': 5},
{'col1': 5, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 5, 'number_of_stuff_count_3h': 6,
'number_of_stuff_count_24h': 6},
{'col1': 6, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 7, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 8, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 9, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 4,
'number_of_stuff_count_24h': 4}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_aggregation_with_uncommon_windows_flow():
time_format = '%Y-%m-%d %H:%M:%S.%f'
columns = ['sample_time', 'signal', 'isotope']
data = [[datetime.strptime('2021-05-30 16:42:15.797000', time_format).replace(tzinfo=timezone.utc), 790.235, 'U235'],
[datetime.strptime('2021-05-30 16:45:15.798000', time_format).replace(tzinfo=timezone.utc), 498.491, 'U235'],
[datetime.strptime('2021-05-30 16:48:15.799000', time_format).replace(tzinfo=timezone.utc), 34650.00343, 'U235'],
[datetime.strptime('2021-05-30 16:51:15.800000', time_format).replace(tzinfo=timezone.utc), 189.823, 'U235'],
[datetime.strptime('2021-05-30 16:54:15.801000', time_format).replace(tzinfo=timezone.utc), 379.524, 'U235'],
[datetime.strptime('2021-05-30 16:57:15.802000', time_format).replace(tzinfo=timezone.utc), 2225.4952, 'U235'],
[datetime.strptime('2021-05-30 17:00:15.803000', time_format).replace(tzinfo=timezone.utc), 1049.0903, 'U235'],
[datetime.strptime('2021-05-30 17:03:15.804000', time_format).replace(tzinfo=timezone.utc), 41905.63447, 'U235'],
[datetime.strptime('2021-05-30 17:06:15.805000', time_format).replace(tzinfo=timezone.utc), 4987.6764, 'U235'],
[datetime.strptime('2021-05-30 17:09:15.806000', time_format).replace(tzinfo=timezone.utc), 67657.11975, 'U235'],
[datetime.strptime('2021-05-30 17:12:15.807000', time_format).replace(tzinfo=timezone.utc), 56173.06327, 'U235'],
[datetime.strptime('2021-05-30 17:15:15.808000', time_format).replace(tzinfo=timezone.utc), 14249.67394, 'U235'],
[datetime.strptime('2021-05-30 17:18:15.809000', time_format).replace(tzinfo=timezone.utc), 656.831, 'U235'],
[datetime.strptime('2021-05-30 17:21:15.810000', time_format).replace(tzinfo=timezone.utc), 5768.4822, 'U235'],
[datetime.strptime('2021-05-30 17:24:15.811000', time_format).replace(tzinfo=timezone.utc), 929.028, 'U235'],
[datetime.strptime('2021-05-30 17:27:15.812000', time_format).replace(tzinfo=timezone.utc), 2585.9646, 'U235'],
[datetime.strptime('2021-05-30 17:30:15.813000', time_format).replace(tzinfo=timezone.utc), 358.918, 'U235']]
df = pd.DataFrame(data, columns=columns)
controller = build_flow([
DataframeSource(df, time_field="sample_time", key_field="isotope"),
AggregateByKey([FieldAggregator("samples", "signal", ["count"],
FixedWindows(['15m', '25m', '45m', '1h']))], Table("U235_test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
termination_result = controller.await_termination()
expected = [{'samples_count_15m': 1.0, 'samples_count_25m': 1.0, 'samples_count_45m': 1.0, 'samples_count_1h': 1.0,
'sample_time': pd.Timestamp('2021-05-30 16:42:15.797000+0000', tz='UTC'), 'signal': 790.235,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 2.0, 'samples_count_45m': 2.0, 'samples_count_1h': 2.0,
'sample_time': pd.Timestamp('2021-05-30 16:45:15.798000+0000', tz='UTC'), 'signal': 498.491,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 3.0, 'samples_count_45m': 3.0, 'samples_count_1h': 3.0,
'sample_time': pd.Timestamp('2021-05-30 16:48:15.799000+0000', tz='UTC'), 'signal': 34650.00343,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 4.0, 'samples_count_45m': 4.0, 'samples_count_1h': 4.0,
'sample_time': pd.Timestamp('2021-05-30 16:51:15.800000+0000', tz='UTC'), 'signal': 189.823,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 5.0, 'samples_count_45m': 5.0, 'samples_count_1h': 5.0,
'sample_time': pd.Timestamp('2021-05-30 16:54:15.801000+0000', tz='UTC'), 'signal': 379.524,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 6.0, 'samples_count_45m': 6.0, 'samples_count_1h': 6.0,
'sample_time': pd.Timestamp('2021-05-30 16:57:15.802000+0000', tz='UTC'), 'signal': 2225.4952,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 1.0, 'samples_count_45m': 7.0, 'samples_count_1h': 1.0,
'sample_time': pd.Timestamp('2021-05-30 17:00:15.803000+0000', tz='UTC'), 'signal': 1049.0903,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 2.0, 'samples_count_45m': 8.0, 'samples_count_1h': 2.0,
'sample_time': pd.Timestamp('2021-05-30 17:03:15.804000+0000', tz='UTC'), 'signal': 41905.63447,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 3.0, 'samples_count_45m': 9.0, 'samples_count_1h': 3.0,
'sample_time': pd.Timestamp('2021-05-30 17:06:15.805000+0000', tz='UTC'), 'signal': 4987.6764,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 4.0, 'samples_count_45m': 10.0, 'samples_count_1h': 4.0,
'sample_time': pd.Timestamp('2021-05-30 17:09:15.806000+0000', tz='UTC'), 'signal': 67657.11975,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 5.0, 'samples_count_45m': 11.0, 'samples_count_1h': 5.0,
'sample_time': pd.Timestamp('2021-05-30 17:12:15.807000+0000', tz='UTC'), 'signal': 56173.06327,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 6.0, 'samples_count_45m': 1.0, 'samples_count_1h': 6.0,
'sample_time': pd.Timestamp('2021-05-30 17:15:15.808000+0000', tz='UTC'), 'signal': 14249.67394,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 7.0, 'samples_count_45m': 2.0, 'samples_count_1h': 7.0,
'sample_time': pd.Timestamp('2021-05-30 17:18:15.809000+0000', tz='UTC'), 'signal': 656.831,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 8.0, 'samples_count_45m': 3.0, 'samples_count_1h': 8.0,
'sample_time': pd.Timestamp('2021-05-30 17:21:15.810000+0000', tz='UTC'), 'signal': 5768.4822,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 9.0, 'samples_count_45m': 4.0, 'samples_count_1h': 9.0,
'sample_time': pd.Timestamp('2021-05-30 17:24:15.811000+0000', tz='UTC'), 'signal': 929.028,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 1.0, 'samples_count_45m': 5.0, 'samples_count_1h': 10.0,
'sample_time': pd.Timestamp('2021-05-30 17:27:15.812000+0000', tz='UTC'), 'signal': 2585.9646,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 2.0, 'samples_count_45m': 6.0, 'samples_count_1h': 11.0,
'sample_time': pd.Timestamp('2021-05-30 17:30:15.813000+0000', tz='UTC'), 'signal': 358.918,
'isotope': 'U235'}]
assert termination_result == expected, \
f'actual did not match expected. \n actual: {termination_result} \n expected: {expected}'
def test_fixed_window_aggregation_with_multiple_keys_flow():
time_format = '%Y-%m-%d %H:%M:%S.%f'
columns = ['sample_time', 'signal', 'isotope']
data = [[datetime.strptime('2021-05-30 16:42:15.797000', time_format).replace(tzinfo=timezone.utc), 790.235, 'U235'],
[datetime.strptime('2021-05-30 16:45:15.798000', time_format).replace(tzinfo=timezone.utc), 498.491, 'U235'],
[datetime.strptime('2021-05-30 16:48:15.799000', time_format).replace(tzinfo=timezone.utc), 34650.00343, 'U238'],
[datetime.strptime('2021-05-30 16:51:15.800000', time_format).replace(tzinfo=timezone.utc), 189.823, 'U238'],
[datetime.strptime('2021-05-30 16:54:15.801000', time_format).replace(tzinfo=timezone.utc), 379.524, 'U238'],
[datetime.strptime('2021-05-30 16:57:15.802000', time_format).replace(tzinfo=timezone.utc), 2225.4952, 'U238'],
[datetime.strptime('2021-05-30 17:00:15.803000', time_format).replace(tzinfo=timezone.utc), 1049.0903, 'U235'],
[datetime.strptime('2021-05-30 17:03:15.804000', time_format).replace(tzinfo=timezone.utc), 41905.63447, 'U238'],
[datetime.strptime('2021-05-30 17:06:15.805000', time_format).replace(tzinfo=timezone.utc), 4987.6764, 'U235'],
[datetime.strptime('2021-05-30 17:09:15.806000', time_format).replace(tzinfo=timezone.utc), 67657.11975, 'U235'],
[datetime.strptime('2021-05-30 17:12:15.807000', time_format).replace(tzinfo=timezone.utc), 56173.06327, 'U235'],
[datetime.strptime('2021-05-30 17:15:15.808000', time_format).replace(tzinfo=timezone.utc), 14249.67394, 'U238'],
[datetime.strptime('2021-05-30 17:18:15.809000', time_format).replace(tzinfo=timezone.utc), 656.831, 'U238'],
[datetime.strptime('2021-05-30 17:21:15.810000', time_format).replace(tzinfo=timezone.utc), 5768.4822, 'U235'],
[datetime.strptime('2021-05-30 17:24:15.811000', time_format).replace(tzinfo=timezone.utc), 929.028, 'U235'],
[datetime.strptime('2021-05-30 17:27:15.812000', time_format).replace(tzinfo=timezone.utc), 2585.9646, 'U238'],
[datetime.strptime('2021-05-30 17:30:15.813000', time_format).replace(tzinfo=timezone.utc), 358.918, 'U238']]
df = pd.DataFrame(data, columns=columns)
controller = build_flow([
DataframeSource(df, time_field="sample_time", key_field="isotope"),
AggregateByKey([FieldAggregator("samples", "signal", ["count"],
FixedWindows(['10m', '15m']))], Table("U235_test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
termination_result = controller.await_termination()
expected = [
{'samples_count_10m': 1.0, 'samples_count_15m': 1.0,
'sample_time': pd.Timestamp('2021-05-30 16:42:15.797000+0000', tz='UTC'), 'signal': 790.235, 'isotope': 'U235'},
{'samples_count_10m': 2.0, 'samples_count_15m': 1.0,
'sample_time': pd.Timestamp('2021-05-30 16:45:15.798000+0000', tz='UTC'), 'signal': 498.491, 'isotope': 'U235'},
{'samples_count_10m': 1.0, 'samples_count_15m': 1.0,
'sample_time': pd.Timestamp('2021-05-30 16:48:15.799000+0000', tz='UTC'), 'signal': 34650.00343, 'isotope': 'U238'},
{'samples_count_10m': 1.0, 'samples_count_15m': 2.0,
'sample_time': pd.Timestamp('2021-05-30 16:51:15.800000+0000', tz='UTC'), 'signal': 189.823, 'isotope': 'U238'},
{'samples_count_10m': 2.0, 'samples_count_15m': 3.0,
'sample_time': pd.Timestamp('2021-05-30 16:54:15.801000+0000', tz='UTC'), 'signal': 379.524, 'isotope': 'U238'},
{'samples_count_10m': 3.0, 'samples_count_15m': 4.0,
'sample_time': pd.Timestamp('2021-05-30 16:57:15.802000+0000', tz='UTC'), 'signal': 2225.4952, 'isotope': 'U238'},
{'samples_count_10m': 1.0, 'samples_count_15m': 1.0,
'sample_time': pd.Timestamp('2021-05-30 17:00:15.803000+0000', tz='UTC'), 'signal': 1049.0903, 'isotope': 'U235'},
{'samples_count_10m': 1.0, 'samples_count_15m': 1.0,
'sample_time': pd.Timestamp('2021-05-30 17:03:15.804000+0000', tz='UTC'), 'signal': 41905.63447, 'isotope': 'U238'},
{'samples_count_10m': 2.0, 'samples_count_15m': 2.0,
'sample_time': pd.Timestamp('2021-05-30 17:06:15.805000+0000', tz='UTC'), 'signal': 4987.6764, 'isotope': 'U235'},
{'samples_count_10m': 3.0, 'samples_count_15m': 3.0,
'sample_time': | pd.Timestamp('2021-05-30 17:09:15.806000+0000', tz='UTC') | pandas.Timestamp |
__author__ = "<NAME>, <EMAIL>, <EMAIL>"
__date__ = "January 1, 2021 10:00:00 AM"
import os
import subprocess
from time import strftime
import numpy as np
import pandas as pd
from PIL import Image
import matplotlib.pyplot as plt
from scipy import stats
from keras.models import load_model
from keras.preprocessing import image
class miniqPCRdeepNet(object):
def __init__(self, runParameters):
self.runParameters = runParameters
def initialize(self):
self.runParameters['image_size'] = [299,299]
class_file = self.runParameters['trained_convnet_file'].replace('model_','classes_').replace('.h5','.tsv')
self.runParameters['trained_model'] = load_model(self.runParameters['trained_convnet_file'])
df = | pd.read_csv(class_file,sep='\t') | pandas.read_csv |
import tkinter as tk
from tkinter import *
from tkinter import ttk
import data
from tkinter import messagebox
import pandas as pd
from random import randint
## Cores
color1 = '#ffffff'
color2 = '#7CEBEA'
color3 = '#D6EB4D'
color4 = '#AB4DEB'
color5 = '#EB8F59'
selected ='#66d1d0'
## lista da combobox de mercados
mercados = ['Condor', 'Bistek', 'Hipermais Joao Costa', 'Hipermais Araquari', 'Fort Atacadista', 'Rodrigues']
## Lista em uma combobox os códigos dos produtos inseridos no banco do mercado selecionado através da main.py
def listing(): ## Essa função fica responsável por atualizar a conexão com o banco de dados para retornar as informações dele
mercado = cmercados.get()
combo.set('')
query = "SELECT CODE FROM '"+mercado+"' order by CODE"
vcon = data.ConnectDB()
linhas = data.fill(vcon, query)
listas = list(linhas)
combo['values'] = (listas)
return listas
## Uitilizando a combobox de mercados para conectar a tree com o banco de dados de planilha de cada mercado
def mercado_selected():
app.delete(*app.get_children())
mercado = cmercados.get()
if mercado == "Condor":
market = "Condor_pl"
elif mercado == "Bistek":
market = "Bistek_pl"
elif mercado == "Hipermais Joao Costa":
market = "Hiper_joao_pl"
elif mercado == "Hipermais Araquari":
market = "Hiper_ara_pl"
elif mercado == "Fort Atacadista":
market = "Fort_pl"
elif mercado == "Rodrigues":
market = "Rodrigues_pl"
listing()
query = "SELECT CODE, PRODUCT, MEAN, VALUE FROM '"+market+"' order by CODE"
vcon = data.ConnectDB()
linhas = data.fill(vcon,query)
for i in linhas:
app.insert("","end",values=i)
## Cria uma planilha para cada pedido realizando uma consulta no banco de dados de cada mercado
def create_spreadsheet():
x = randint(1, 999999)
vcon = data.ConnectDB()
condor_query = 'SELECT CODE, PRODUCT, MEAN, VALUE FROM Condor_pl order by CODE'
condor = data.fill(vcon,condor_query)
condor = list(condor)
bistek_query = 'SELECT CODE, PRODUCT, MEAN, VALUE FROM Bistek_pl order by CODE'
bistek = data.fill(vcon,bistek_query)
bistek = list(bistek)
hipjc_query = 'SELECT CODE, PRODUCT, MEAN, VALUE FROM Hiper_joao_pl order by CODE'
hipjc = data.fill(vcon,hipjc_query)
hipjc = list(hipjc)
hipara_query = 'SELECT CODE, PRODUCT, MEAN, VALUE FROM Hiper_ara_pl order by CODE'
hipara = data.fill(vcon,hipara_query)
hipara = list(hipara)
fort_query = 'SELECT CODE, PRODUCT, MEAN, VALUE FROM Fort_pl order by CODE'
fort = data.fill(vcon,fort_query)
fort = list(fort)
rod_query = 'SELECT CODE, PRODUCT, MEAN, VALUE FROM Rodrigues_pl order by CODE'
rod = data.fill(vcon,rod_query)
rod = list(rod)
writer = pd.ExcelWriter(f'faturamento_{x}.xlsx', engine='xlsxwriter')
df1 = | pd.DataFrame(condor, columns=["Código", "Produto", "Média", "Qtd"]) | pandas.DataFrame |
from __future__ import print_function
_README_ = '''
-------------------------------------------------------------------------
Generate JSON files for GBE decomposition page.
-p option outputs python numpy npz file (compressed format) for python
Author: <NAME> (<EMAIL>)
Date: 2017/12/01
-------------------------------------------------------------------------
'''
import pandas as pd
import numpy as np
import os, sys, json, re, gzip, argparse, logging, collections
from datetime import datetime
from functools import reduce
from scipy.sparse import dok_matrix
from logging.config import dictConfig
import rpy2.robjects as robjects
logging_config = dict(
version = 1,
formatters = {
'f': {'format':
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s'}
},
handlers = {
'h': {'class': 'logging.StreamHandler',
'formatter': 'f',
'level': logging.DEBUG}
},
root = {
'handlers': ['h'],
'level': logging.INFO,
#'level': logging.DEBUG,
},
)
dictConfig(logging_config)
def parse_label_phe(label_phe_f):
label_phe_df = pd.read_csv(label_phe_f, sep='\t', compression='gzip')
label_phe_code = label_phe_df['icd'].as_matrix()
label_phe = label_phe_df['Name'].map(lambda x: re.sub('_', ' ', re.sub('_/_', '/', x))).as_matrix()
return label_phe, label_phe_code
def parse_label_var(label_var_f):
label_var_df = pd.read_csv(label_var_f, sep='\t', compression='gzip')
return label_var_df['signature'].map(lambda x: re.sub('_', '-', x)).as_matrix()
def read_eigen_values(tsvd_f):
eigen_v_dict = dict([])
with gzip.open(tsvd_f) as f:
for line in f:
l = line.split('\t')
if(l[0] == '1'):
eigen_v_dict[int(l[2])] = float(l[3])
return np.array([eigen_v_dict[x] for x in sorted(eigen_v_dict.keys())])
def read_eigen_vectors(tsvd_f, n_PCs, n_phes, n_vars):
eigen_phe_dok = dok_matrix((n_phes, n_PCs), dtype = np.float)
eigen_var_dok = dok_matrix((n_vars, n_PCs), dtype = np.float)
with gzip.open(tsvd_f) as f:
for line in f:
l = line.split('\t')
if( l[0] == '0' and int(l[1]) < n_phes and int(l[2]) < n_PCs):
eigen_phe_dok[int(l[1]), int(l[2])] = float(l[3])
elif(l[0] == '2' and int(l[2]) < n_vars and int(l[1]) < n_PCs):
eigen_var_dok[int(l[2]), int(l[1])] = float(l[3])
return np.array(eigen_phe_dok.todense()), np.array(eigen_var_dok.todense())
def dok_from_tsv(tsv_f, dtype=np.float):
logger = logging.getLogger('dok_from_tsv')
logger.info('reading {}'.format(tsv_f))
df = pd.read_csv(tsv_f, sep='\t', compression='gzip')
logger.info('constructing a dok matrix of size {} x {}'.format(len(set(df.ix[:, 0])), len(set(df.ix[:, 1]))))
dok_mat = dok_matrix(
(len(set(df.ix[:, 0])), len(set(df.ix[:, 1]))),
dtype = dtype
)
dok_mat.update(
dict(
zip(
zip(
df.ix[:, 0].tolist(),
df.ix[:, 1].tolist()
),
df.ix[:, 2].tolist()
)
)
)
return dok_mat
def read_ssvd_rds(rds_file):
'''
Read RDS file that contains the results of ssvd
'''
r_funcs = collections.OrderedDict()
for func in ['readRDS', 'as.matrix']:
r_funcs[func] = robjects.r[func]
res = r_funcs['readRDS'](rds_file)
return dict(zip(
res.names,
[x[0,0] if x.shape == (1,1) else x
for x in
[np.array(r_funcs['as.matrix'](x))
for x in list(res)]]
))
def compute_factor(eigen_vec, eigen_values):
return np.dot(eigen_vec, np.diag(eigen_values))
def compute_contribution(factor):
return (factor ** 2) / (np.sum(factor ** 2, axis = 0).reshape((1, factor.shape[1])))
def compute_cos(factor):
return (factor ** 2) / (np.sum(factor ** 2, axis = 1).reshape((factor.shape[0], 1)))
def compute_contribution_gene(
var2gene_dict, label_var, contribution_var
):
contribution_var_df = | pd.DataFrame(contribution_var) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Covid-19 em São Paulo
Gera gráficos para acompanhamento da pandemia de Covid-19
na cidade e no estado de São Paulo.
@author: https://github.com/DaviSRodrigues
"""
from datetime import datetime, timedelta
from io import StringIO
import locale
import math
from tableauscraper import TableauScraper
import traceback
import unicodedata
import pandas as pd
import plotly.graph_objects as go
import plotly.io as pio
from plotly.subplots import make_subplots
import requests
def main():
locale.setlocale(locale.LC_ALL, 'pt_BR.UTF-8')
print('Carregando dados...')
hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total = carrega_dados_cidade()
dados_munic, dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, doses_aplicadas, doses_recebidas, dados_imunizantes, atualizacao_imunizantes = carrega_dados_estado()
print('\nLimpando e enriquecendo dos dados...')
dados_cidade, dados_munic, hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total, dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, dados_imunizantes = pre_processamento(hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total, dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, doses_aplicadas, doses_recebidas, dados_munic, dados_imunizantes, atualizacao_imunizantes)
evolucao_cidade, evolucao_estado = gera_dados_evolucao_pandemia(dados_munic, dados_estado, isolamento, dados_vacinacao, internacoes)
evolucao_cidade, evolucao_estado = gera_dados_semana(evolucao_cidade, evolucao_estado, leitos_estaduais, isolamento, internacoes)
print('\nGerando gráficos e tabelas...')
gera_graficos(dados_munic, dados_cidade, hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total, dados_estado, isolamento, leitos_estaduais, evolucao_cidade, evolucao_estado, internacoes, doencas, dados_raciais, dados_vacinacao, dados_imunizantes)
print('\nAtualizando serviceWorker.js...')
atualiza_service_worker(dados_estado)
print('\nFim')
def carrega_dados_cidade():
hospitais_campanha = pd.read_csv('dados/hospitais_campanha_sp.csv', sep=',')
leitos_municipais = pd.read_csv('dados/leitos_municipais.csv', sep=',')
leitos_municipais_privados = pd.read_csv('dados/leitos_municipais_privados.csv', sep=',')
leitos_municipais_total = pd.read_csv('dados/leitos_municipais_total.csv', sep=',')
return hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total
def carrega_dados_estado():
hoje = data_processamento
ano = hoje.strftime('%Y')
mes = hoje.strftime('%m')
data = hoje.strftime('%Y%m%d')
try:
print('\tAtualizando dados dos municípios...')
URL = 'https://raw.githubusercontent.com/seade-R/dados-covid-sp/master/data/dados_covid_sp.csv'
dados_munic = | pd.read_csv(URL, sep=';', decimal=',') | pandas.read_csv |
import numpy as np
import pandas as pd
from dowhy.causal_estimator import CausalEstimator
class PropensityScoreEstimator(CausalEstimator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# We need to initialize the model when we create any propensity score estimator
self._propensity_score_model = None
# Check if the treatment is one-dimensional
if len(self._treatment_name) > 1:
error_msg = str(self.__class__) + "cannot handle more than one treatment variable"
raise Exception(error_msg)
# Checking if the treatment is binary
if not pd.api.types.is_bool_dtype(self._data[self._treatment_name[0]]):
error_msg = "Propensity score methods are applicable only for binary treatments"
self.logger.error(error_msg)
raise Exception(error_msg)
self.logger.debug("Back-door variables used:" +
",".join(self._target_estimand.backdoor_variables))
self._observed_common_causes_names = self._target_estimand.backdoor_variables
if self._observed_common_causes_names:
self._observed_common_causes = self._data[self._observed_common_causes_names]
# Convert the categorical variables into dummy/indicator variables
# Basically, this gives a one hot encoding for each category
# The first category is taken to be the base line.
self._observed_common_causes = | pd.get_dummies(self._observed_common_causes, drop_first=True) | pandas.get_dummies |
# %% [markdown]
# pip install -r pykrx
# %%
from datetime import datetime, timedelta
import FinanceDataReader as fdr
import yfinance as yf
import numpy as np
import pandas as pd
from pykrx import stock
import time
import bt
import warnings
# from tqdm import tqdm
warnings.filterwarnings(action='ignore')
# pd.options.display.float_format = '{:.4f}'.format
# %matplotlib inline
from IPython.display import display, HTML
#하나의 cell에서 multiple output을 출력을 가능하게 하는 코드
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# Pandas Dataframe의 사이즈가 큰 경우, 어떻게 화면에 출력을 할지를 세팅하는 코드
pd.set_option('display.float_format', lambda x: '%.3f' % x)
pd.set_option('max_columns', None)
# %%
#from strategy import*
#from utils import *
# %%
def 장중이냐(now):
return (9 <= now.hour <= 14) or (now.hour == 15 and (now.minute <= 30))
# %%
def AMS(x):
''' x : Series (DataFrame의 컬럼)
x[-1] : 기준일. x의 현재값
(오늘날짜/과거날짜 - 1) > 0 보다 크면 1, 아니면 0
=> 오늘날짜/과거날짜 > 1 => 오늘날짜 > 과거날짜 => x[-1] > x
'''
# print(f"{list(np.where(x[-1]>x, 1, 0)[:-1])}, {len(np.where(x[-1]>x, 1, 0)[:-1])}")
return np.mean(np.where(x[-1]>x, 1, 0)[:-1]) # 당일 날짜 비교는 제외해준다 [:-1]
# %%
# get_data
# code_list is tickers['code']
# start : before_13months
# end : baseday
def get_data(code_list, start, end):
df = pd.DataFrame()
tot = len(code_list)
count = 0
for code in code_list: # tqdm(code_list)
count += 1
print(f"{count}/{tot} : {code}")
t = fdr.DataReader(code, start, end)['Close'].rename(code)
# t = stock.get_market_ohlcv_by_date(start, end, code)['종가'].rename(code)
df = bt.merge(df, t)
time.sleep(0.75)
# 맨마지막 값이 NaN인 컬럼을 삭제한다.
for c in df.columns:
if pd.isna(df.iloc[-1][c]):
print(f"drop : {c}")
df.drop(c, axis=1, inplace=True)
return df
# %%
def 종목명(code, df):
""" 사용예) 종목명('A153130', tickers) or 종목명('153130', tickers)
"""
if code.startswith('A'):
return df[df['종목코드'] == code]['종목명'].values[0]
else:
return df[df['code'] == code]['종목명'].values[0]
def 종목코드(name, df):
""" A를 제외한 종목코드를 반환한다. FinanceDataReader에서 사용
사용예: 종목코드("KODEX달러선물레버리지", tickers)
"""
_df = df.copy()
_df['종목명'] = _df['종목명'].str.replace(' ', '')
return _df[_df['종목명'] == name.replace(' ', '')]['code'].values[0]
# %%
def pickup(df, 제외직전개월수=1):
"""df에서 모멘텀이 가장 좋은 3종목을 선택한다.
Args :
- df : 가격 데이터프레임
- 제외직전개월수 : df에서 제외할 데이터 개월 수
- now : 가격 데이터프레임의 가장 아래 시간
"""
t0 = df.index[-1]
제외 = t0 - pd.DateOffset(months=제외직전개월수)
m6 = t0 - pd.DateOffset(months=6)
m9 = t0 - pd.DateOffset(months=9)
m12 = t0 - pd.DateOffset(months=12)
m13 = t0 - pd.DateOffset(months=13)
m6_returns = (df.loc[m6:제외,:].calc_total_return()+1) # 1달제외 6개월 수익률 (현재 prices가 공휴일포함 데이터임)
m9_returns = (df.loc[m9:제외,:].calc_total_return()+1) # 1달제외 9개월 수익률
m12_returns = (df.loc[m12:제외,:].calc_total_return()+1) # 1달제외 12개월 수익률
average_returns = (m6_returns+m9_returns+m12_returns)/3
# ID 계산 최근 30일 제외
# dropna에 주의 해야 한다. 조선이 0이 있어 문제가 되므로 모든 column이 nan일 때만 drop한다.
len_m1= round(len(df.loc[m12:,:])/12) # 한달 일수
# print(f"{t0}, {m1}, {m6}, {m9}, {m12}, {m13}, {len_m1}")
pos_percent = np.where(df.loc[m13:,:].pct_change(len_m1).dropna(how='all') > 0.0, 1, 0).mean(axis=0)
neg_percent = 1 - pos_percent
ID = (neg_percent - pos_percent)
momentum = average_returns * ID * -1
print(f"pickup : ======================\n{momentum.nlargest(3)}\n=================================")
return list(momentum.nlargest(3).index)
# 예제
# pickup(price_df, 0)
# %%
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
def calcOECD시그널비중():
try:
df = pd.read_csv("https://stats.oecd.org/sdmx-json/data/DP_LIVE/KOR.CLI.AMPLITUD.LTRENDIDX.M/OECD?contentType=csv&detail=code&separator=comma&csv-lang=en&startPeriod=2021-01")
oecd = df[['TIME', 'Value']]
oecd.set_index('TIME', inplace=True)
oecd.index = pd.to_datetime(oecd.index)
oecd['전월비'] = oecd.pct_change()+1
oecd.drop('Value', axis=1, inplace=True)
target_weight = 1 if oecd.iloc[-1][0] > 1 else 0
# target_weights['cash'] = 1 - target_weights
# target_weights.columns = ['base1', cash]
except:
raise Exception('OECD 데이터를 못 받아왔습니다.')
return target_weight
# 예제
# OECD시그널비중()
# %% [markdown]
# 저녁에 돌리면 다음날 리밸런싱할것이고
# 9시전에 돌리면 오늘 리밸런싱할 비중(어제종가 기준)을 구할려고 하고
# 장중에 돌리면 오늘 리밸런싱할 비중(어제종가 기준)을 구할려고 한다.
# %%
# 외국인 수급 읽어 와야
# 개인 수급도 읽어 와야
def calc외국인수급비중(df):
baseday = df.index[-1]
before_one_year = baseday - | pd.DateOffset(years=1) | pandas.DateOffset |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = | Series([20, 30, 40]) | pandas.Series |
#!/usr/bin/env python3
# coding: utf-8
import sys
import pickle
import sklearn
import numpy as np
import pandas as pd
def load_sepsis_model():
with open('nbclf.pkl','rb') as f:
clf = pickle.load(f)
return clf
def get_sepsis_score(data_mat, clf):
# convert d to dataframe from numpy
varofint = ['HR','O2Sat','Temp','SBP','MAP','DBP']
d = pd.DataFrame(data=data_mat[-24:,0:6], columns=varofint)#select only the latest 48 hours
sepD = d[varofint].transform(lambda x: x.interpolate(limit=25,limit_direction='both') )
if data_mat.shape[0] > 3:
nameL = ['sumHR','sumO2','sumTemp','sumSP','sumMAP','sumDP', 'varHR','varO2','varTemp','varSP','varMAP','varDP','maxHR','maxO2','maxTemp','maxSP','maxMAP','maxDP', 'minHR','minO2','minTemp','minSP','minMAP','minDP']
#sepD = interpD
rollsumD = sepD[varofint].rolling(3, min_periods=1).sum().reset_index()
rollsumD = rollsumD.fillna(0)
rollvarD = sepD[varofint].rolling(3, min_periods=1).var().reset_index()
rollvarD = rollvarD.fillna(0)
rollmaxD = sepD[varofint].rolling(3, min_periods=1).max().reset_index()
rollmaxD = rollmaxD.fillna(0)
rollminD = sepD[varofint].rolling(3, min_periods=1).min().reset_index()
rollminD = rollminD.fillna(0)
else:
#print(0.5,0)
return 0.5, 0
rowN = sepD.shape[0]
y = np.zeros(rowN)
proba = np.zeros(rowN)
for i in range(rowN):
featureD = | pd.DataFrame(columns=['sumHR','sumO2','sumTemp','sumSP','sumMAP','sumDP', 'varHR','varO2','varTemp','varSP','varMAP','varDP','maxHR','maxO2','maxTemp','maxSP','maxMAP','maxDP', 'minHR','minO2','minTemp','minSP','minMAP','minDP']) | pandas.DataFrame |
# Import libraries
import json
import matplotlib.pyplot as plt, numpy as np, pandas as pd
from sklearn.neighbors import radius_neighbors_graph
from scipy.sparse.csgraph import connected_components
# Contact spacing
dist1 = 0.2
dist2 = 0.5
# Get connected components and distances
data = pd.read_csv("output/dispcont.csv", names=["x", "y", "w", "one"], usecols=["x", "y"])
data["cc"] = connected_components(radius_neighbors_graph(data.values, 0.06))[1]
ccs = data.groupby("cc").count().reset_index().sort_values("x")[-4:]["cc"].values
data["dist"] = np.sqrt(data["x"]**2 + data["y"]**2)
# Get points
refpts = []
pts = []
for cc in ccs:
# Filter to connected component
ccdata = data.loc[data["cc"] == cc]
# Reference point that is closest to center
refpt = ccdata.loc[ccdata["dist"].idxmin()]
ccdata["refdist"] = np.sqrt((ccdata["x"]-refpt["x"])**2 + (ccdata["y"]-refpt["y"])**2)
# Get first two points
closest_to_target_dist = ccdata.iloc[(ccdata["refdist"]-dist1).abs().argsort()]
pt1 = closest_to_target_dist.iloc[0]
for i, pt2 in closest_to_target_dist.iterrows():
if np.abs((pt2["x"]-pt1["x"])**2 + (pt2["y"]-pt1["y"])**2) > dist1/3:
break
# Get second two points
closest_to_target_dist = ccdata.iloc[(ccdata["refdist"]-dist2).abs().argsort()]
pt3 = closest_to_target_dist.iloc[0]
for i, pt4 in closest_to_target_dist.iterrows():
if np.abs((pt4["x"]-pt3["x"])**2 + (pt4["y"]-pt3["y"])**2) > dist2/3:
break
# Add points to list
refpts.append(refpt)
pts += [pt1, pt2, pt3, pt4]
# Convert to DataFrame
pts = pd.DataFrame(pts)
refpts = | pd.DataFrame(refpts) | pandas.DataFrame |
import pandas as pd
DELIMITER = '|'
cols = ['Base', 'E_nX', 'E_X', 'C_nX', 'C_X']
connectives = ['I repeat', 'again', 'in short', 'therefore', 'that is', 'thus']
expanders = {
'{Prep}': ['Near', 'By', 'Nearby'], # ['near', 'nearby', 'by']
'{E/D}': ['Here is', 'This is'], # ['Here is', 'There is', 'That is', 'This is', 'It is']
'{Comp}': ['that'], # ['that', 'which']
'{Conn}': ['; ' + c + ', ' for c in connectives] + ['. ' + c[0].upper() + c[1:] + ', ' for c in connectives]
}
# Returns a list of dicts; each tuple becomes a dict
def read_items(filename):
df = pd.read_csv(filename, sep='\t')
assert ~df.isna().values.any(), 'Input CSV has NA'
return df.to_dict('records') # Each dict has keys A, B, Vtr, Vin1, Vin2
# Returns a list of dataframes
def read_templates(*filenames):
templates = []
for f in filenames:
# Read into pandas df
df = pd.read_csv(f, sep='\t', na_filter=False)
# Expand the last four columns into lists of strings, rather than single strings
for c in cols:
df[c] = df[c].apply(lambda s: []) if (not df[c].any()) else df[c].apply(lambda s: s.split(DELIMITER))
templates.append(df)
return templates
def _map(item, w):
return item[w] if (w in item.keys()) else w
# Returns a dataframe much like template, but phrases are split into string-arrays instead of full strings
def expand_template(items, template):
"""
For each tuple, create all possible phrases defined by each column of the template.
"""
expanded_lines = []
for item in items: # Recall that item is a dict, and will serve as a map from templateland to languageland
for i, line in template.iterrows():
phrases_per_line = []
for c in cols:
# "Forms" may be a Base (1st column) or an implication (rest of the columns).
# After base, each column may be empty, contain one form, or contain multiple form. Hence line[c] is a list.
forms = line[c]
if forms:
forms = [s.split(' ') for s in forms]
forms = [[_map(item, w) for w in s] for s in forms]
phrases_per_line.append(forms)
# Now phrases_per_line contains all the phrases given by each column, for one line (row) of the template
# base, e_nX, e_X, c_nX, c_X = phrases_per_line # e.g. if we were to unpack this line
expanded_lines.append(phrases_per_line)
# Note that one "item" or original line from the tuples yields many sentences/expanded lines
return pd.DataFrame.from_records(expanded_lines, columns=cols)
# Joins a string list and adds a space or | character in-between strings
def _join(str_list, delimiter=' '):
return str_list[0] + ''.join([delimiter + w for w in str_list[1:]]) if str_list else None
def form_unexpanded_sentences(items, template, capitalize=True):
phrases = expand_template(items, template)
unexpanded_sentences = []
for i, line in phrases.iterrows():
base = _join(line[cols[0]][0])
base = (base[0].upper() + base[1:]) if capitalize else base
sentences_per_line = []
for c in cols[1:]:
implications = line[c]
if implications:
implications = [_join(s) for s in implications]
sentences = [(base + ' {Conn} ' + s + '.') for s in implications]
sentences = _join(sentences, delimiter=DELIMITER) # Use a special delimiter, since Pandas will serialize lists/strings on writing to file
sentences_per_line.append(sentences)
# print('adding {} for {}'.format(sentences, c))
unexpanded_sentences.append(sentences_per_line)
return | pd.DataFrame.from_records(unexpanded_sentences, columns=cols[1:]) | pandas.DataFrame.from_records |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
import random
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_print_temp_data():
target = mock.MagicMock()
target.temp={}
target.temp['selected'] = ['c1','c2']
target.temp['weights'] = [0.5,0.5]
algo = algos.PrintTempData()
assert algo( target )
algo = algos.PrintTempData( 'Selected: {selected}')
assert algo( target )
def test_print_info():
target = bt.Strategy('s', [])
target.temp={}
algo = algos.PrintInfo()
assert algo( target )
algo = algos.PrintInfo( '{now}: {name}')
assert algo( target )
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
def test_run_if_out_of_bounds():
algo = algos.RunIfOutOfBounds(0.5)
dts = pd.date_range('2010-01-01', periods=3)
s = bt.Strategy('s')
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.temp['selected'] = ['c1', 'c2']
s.temp['weights'] = {'c1': .5, 'c2':.5}
s.update(dts[0])
s.children['c1'] = bt.core.SecurityBase('c1')
s.children['c2'] = bt.core.SecurityBase('c2')
s.children['c1']._weight = 0.5
s.children['c2']._weight = 0.5
assert not algo(s)
s.children['c1']._weight = 0.25
s.children['c2']._weight = 0.75
assert not algo(s)
s.children['c1']._weight = 0.24
s.children['c2']._weight = 0.76
assert algo(s)
s.children['c1']._weight = 0.75
s.children['c2']._weight = 0.25
assert not algo(s)
s.children['c1']._weight = 0.76
s.children['c2']._weight = 0.24
assert algo(s)
def test_run_after_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDate('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-03')
assert algo(target)
def test_run_after_days():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDays(3)
assert not algo(target)
assert not algo(target)
assert not algo(target)
assert algo(target)
def test_set_notional():
algo = algos.SetNotional('notional')
s = bt.FixedIncomeStrategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
notional = pd.Series(index=dts[:2], data=[1e6, 5e6])
s.setup( data, notional = notional )
s.update(dts[0])
assert algo(s)
assert s.temp['notional_value'] == 1e6
s.update(dts[1])
assert algo(s)
assert s.temp['notional_value'] == 5e6
s.update(dts[2])
assert not algo(s)
def test_rebalance():
algo = algos.Rebalance()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c1 = s['c1']
assert c1.value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000
assert c2.position == 10
assert c2.weight == 1.
def test_rebalance_with_commissions():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 999
assert s.capital == 99
c1 = s['c1']
assert c1.value == 900
assert c1.position == 9
assert c1.weight == 900 / 999.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 997
assert s.capital == 97
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 900
assert c2.position == 9
assert c2.weight == 900. / 997
def test_rebalance_with_cash():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
# set cash amount
s.temp['cash'] = 0.5
assert algo(s)
assert s.value == 999
assert s.capital == 599
c1 = s['c1']
assert c1.value == 400
assert c1.position == 4
assert c1.weight == 400.0 / 999
s.temp['weights'] = {'c2': 1}
# change cash amount
s.temp['cash'] = 0.25
assert algo(s)
assert s.value == 997
assert s.capital == 297
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 700
assert c2.position == 7
assert c2.weight == 700.0 / 997
def test_rebalance_updatecount():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.use_integer_positions(False)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4','c5'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
assert s.value == 1000
assert s.capital == 0
# Update is called once when each weighted security is created (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[1])
s.temp['weights'] = {'c1': 0.5, 'c2':0.5}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[2])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (2)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 6
def test_rebalance_fixedincome():
algo = algos.Rebalance()
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
s = bt.FixedIncomeStrategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
coupons = pd.DataFrame(index=dts, columns=['c2'], data=0)
s.setup(data, coupons=coupons)
s.update(dts[0])
s.temp['notional_value'] = 1000
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000
c1 = s['c1']
assert c1.value == 1000
assert c1.notional_value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000*100
c2 = s['c2']
assert c1.value == 0
assert c1.notional_value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000*100
assert c2.notional_value == 1000
assert c2.position == 1000
assert c2.weight == 1.
def test_select_all():
algo = algos.SelectAll()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectAll(include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectAll(include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly_n_none():
algo = algos.SelectRandomly(n=None) # Behaves like SelectAll
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectRandomly(n=None, include_no_data=True)
assert algo2(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectRandomly(n=None, include_negative=True)
assert algo3(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100.)
data['c1'][dts[0]] = np.nan
data['c2'][dts[0]] = 95
data['c3'][dts[0]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectRandomly(n=1)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
random.seed(1000)
algo = algos.SelectRandomly(n=1, include_negative=True)
assert algo(s)
assert s.temp.pop('selected') == ['c3']
random.seed(1009)
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c1']
random.seed(1009)
# If selected already set, it will further filter it
s.temp['selected'] = ['c2']
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
def test_select_these():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
algo = algos.SelectThese( ['c1'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectThese( ['c1', 'c2'], include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectThese(['c1', 'c2'], include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where_all():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
s.setup(data, where = where)
s.update(dts[0])
algo = algos.SelectWhere('where')
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectWhere('where', include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectWhere('where', include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere('where')
s.setup(data, where=where)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_where_legacy():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere(where)
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_regex():
s = bt.Strategy('s')
algo = algos.SelectRegex( 'c1' )
s.temp['selected'] = ['a1', 'c1', 'c2', 'c11', 'cc1']
assert algo( s )
assert s.temp['selected'] == ['c1', 'c11', 'cc1']
algo = algos.SelectRegex( '^c1$' )
assert algo( s )
assert s.temp['selected'] == ['c1']
def test_resolve_on_the_run():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'b1'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c2'][dts[2]] = -5
on_the_run = pd.DataFrame(index=dts, columns=['c'], data='c1')
on_the_run.loc[dts[2], 'c'] = 'c2'
s.setup(data, on_the_run = on_the_run)
s.update(dts[0])
s.temp['selected'] = ['c', 'b1']
algo = algos.ResolveOnTheRun( 'on_the_run' )
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# make sure don't keep nan
s.update(dts[1])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
# if specify include_no_data then 2
algo2 = algos.ResolveOnTheRun('on_the_run', include_no_data=True)
s.temp['selected'] = ['c', 'b1']
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# behavior on negative prices
s.update(dts[2])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
algo3 = algos.ResolveOnTheRun('on_the_run', include_negative=True)
s.temp['selected'] = ['c', 'b1']
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c2' in selected
assert 'b1' in selected
def test_select_types():
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
c3 = bt.HedgeSecurity('c3')
c4 = bt.CouponPayingHedgeSecurity('c4')
c5 = bt.FixedIncomeSecurity('c5')
s = bt.Strategy('p', children = [c1, c2, c3, c4, c5])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4', 'c5'], data=100.)
coupons = pd.DataFrame(index=dts, columns=['c2', 'c4'], data=0.)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
algo = algos.SelectTypes(include_types=(bt.Security, bt.HedgeSecurity), exclude_types=())
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3'])
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,), exclude_types=(bt.CouponPayingSecurity,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3', 'c5'])
s.temp['selected'] = ['c1', 'c2', 'c3']
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c2', 'c3'])
def test_weight_equally():
algo = algos.WeighEqually()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.update(dts[0])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.5
assert 'c2' in weights
assert weights['c2'] == 0.5
def test_weight_specified():
algo = algos.WeighSpecified(c1=0.6, c2=0.4)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.6
assert 'c2' in weights
assert weights['c2'] == 0.4
def test_scale_weights():
s = bt.Strategy('s')
algo = algos.ScaleWeights( -0.5 )
s.temp['weights'] = {'c1': 0.5, 'c2': -0.4, 'c3':0 }
assert algo( s )
assert s.temp['weights'] == {'c1':-0.25, 'c2':0.2, 'c3':0}
def test_select_has_data():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=10)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[0]] = np.nan
data['c1'].loc[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
def test_select_has_data_preselected():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[0]] = np.nan
data['c1'].loc[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
@mock.patch('ffn.calc_erc_weights')
def test_weigh_erc(mock_erc):
algo = algos.WeighERC(lookback=pd.DateOffset(days=5))
mock_erc.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_erc.called
rets = mock_erc.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_target():
algo = algos.WeighTarget('target')
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
target = pd.DataFrame(index=dts[:2], columns=['c1', 'c2'], data=0.5)
target['c1'].loc[dts[1]] = 1.0
target['c2'].loc[dts[1]] = 0.0
s.setup( data, target = target )
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.5
assert weights['c2'] == 0.5
s.update(dts[1])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 1.0
assert weights['c2'] == 0.0
s.update(dts[2])
assert not algo(s)
def test_weigh_inv_vol():
algo = algos.WeighInvVol(lookback=pd.DateOffset(days=5))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
data['c1'].loc[dts[1]] = 105
data['c1'].loc[dts[2]] = 95
data['c1'].loc[dts[3]] = 105
data['c1'].loc[dts[4]] = 95
# low vol c2
data['c2'].loc[dts[1]] = 100.1
data['c2'].loc[dts[2]] = 99.9
data['c2'].loc[dts[3]] = 100.1
data['c2'].loc[dts[4]] = 99.9
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c2'] > weights['c1']
aae(weights['c1'], 0.020, 3)
aae(weights['c2'], 0.980, 3)
@mock.patch('ffn.calc_mean_var_weights')
def test_weigh_mean_var(mock_mv):
algo = algos.WeighMeanVar(lookback=pd.DateOffset(days=5))
mock_mv.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_mv.called
rets = mock_mv.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_randomly():
s = bt.Strategy('s')
s.temp['selected'] = ['c1', 'c2', 'c3']
algo = algos.WeighRandomly()
assert algo(s)
weights = s.temp['weights']
assert len( weights ) == 3
assert sum( weights.values() ) == 1.
algo = algos.WeighRandomly( (0.3,0.5), 0.95)
assert algo(s)
weights = s.temp['weights']
assert len( weights ) == 3
aae( sum( weights.values() ), 0.95 )
for c in s.temp['selected']:
assert weights[c] <= 0.5
assert weights[c] >= 0.3
def test_set_stat():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
stat = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=4.)
stat['c1'].loc[dts[1]] = 5.
stat['c2'].loc[dts[1]] = 6.
algo = algos.SetStat( 'test_stat' )
s.setup(data, test_stat = stat)
s.update(dts[0])
print()
print(s.get_data('test_stat'))
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 4.
assert stat['c2'] == 4.
s.update(dts[1])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 5.
assert stat['c2'] == 6.
def test_set_stat_legacy():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
stat = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=4.)
stat['c1'].loc[dts[1]] = 5.
stat['c2'].loc[dts[1]] = 6.
algo = algos.SetStat( stat )
s.setup(data)
s.update(dts[0])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 4.
assert stat['c2'] == 4.
s.update(dts[1])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 5.
assert stat['c2'] == 6.
def test_stat_total_return():
algo = algos.StatTotalReturn(lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
stat = s.temp['stat']
assert len(stat) == 2
assert stat['c1'] == 105.0 / 100 - 1
assert stat['c2'] == 95.0 / 100 - 1
def test_select_n():
algo = algos.SelectN(n=1, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
algo = algos.SelectN(n=1, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# return 2 we have if all_or_none false
algo = algos.SelectN(n=3, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# return 0 we have if all_or_none true
algo = algos.SelectN(n=3, sort_descending=False, all_or_none=True)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
def test_select_n_perc():
algo = algos.SelectN(n=0.5, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
def test_select_momentum():
algo = algos.SelectMomentum(n=1, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
actual = s.temp['selected']
assert len(actual) == 1
assert 'c1' in actual
def test_limit_weights():
s = bt.Strategy('s')
dts = | pd.date_range('2010-01-01', periods=3) | pandas.date_range |
# coding=utf-8
'''
Use coreNLP to lexical analysis for short text
'''
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import pandas as pd
from stanfordcorenlp import StanfordCoreNLP
nlp = StanfordCoreNLP(r'/home/dl/Downloads/stanford-corenlp-full-2018-01-31/', lang='zh')
pos= | pd.read_excel('./data/pos.xls',header=None,index=None,encoding='utf-8') | pandas.read_excel |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
class TestSeriesCombine:
def test_combine_scalar(self):
# GH 21248
# Note - combine() with another Series is tested elsewhere because
# it is used when testing operators
s = pd.Series([i * 10 for i in range(5)])
result = s.combine(3, lambda x, y: x + y)
expected = pd.Series([i * 10 + 3 for i in range(5)])
tm.assert_series_equal(result, expected)
result = s.combine(22, lambda x, y: min(x, y))
expected = pd.Series([min(i * 10, 22) for i in range(5)])
tm.assert_series_equal(result, expected)
def test_update(self):
s = Series([1.5, np.nan, 3.0, 4.0, np.nan])
s2 = Series([np.nan, 3.5, np.nan, 5.0])
s.update(s2)
expected = Series([1.5, 3.5, 3.0, 5.0, np.nan])
tm.assert_series_equal(s, expected)
# GH 3217
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
df["c"].update(Series(["foo"], index=[0]))
expected = DataFrame(
[[1, np.nan, "foo"], [3, 2.0, np.nan]], columns=["a", "b", "c"]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"other, dtype, expected",
[
# other is int
([61, 63], "int32", pd.Series([10, 61, 12], dtype="int32")),
([61, 63], "int64", | pd.Series([10, 61, 12]) | pandas.Series |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import datetime as dt
import os
from typing import Union
import numpy as np
import pandas as pd
import pytest
import pytz
from gs_quant.target.common import XRef, PricingLocation, Currency as CurrEnum
from numpy.testing import assert_allclose
from pandas.testing import assert_series_equal
from pandas.tseries.offsets import CustomBusinessDay
from pytz import timezone
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures as tm
import gs_quant.timeseries.measures_rates as tm_rates
from gs_quant.api.gs.assets import GsTemporalXRef, GsAssetApi, GsIdType, IdList, GsAsset
from gs_quant.api.gs.data import GsDataApi, MarketDataResponseFrame
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.dataset import Dataset
from gs_quant.data.fields import Fields
from gs_quant.errors import MqError, MqValueError, MqTypeError
from gs_quant.markets.securities import AssetClass, Cross, Index, Currency, SecurityMaster, Stock, \
Swap, CommodityNaturalGasHub
from gs_quant.session import GsSession, Environment
from gs_quant.test.timeseries.utils import mock_request
from gs_quant.timeseries import Returns
from gs_quant.timeseries.measures import BenchmarkType
_index = [pd.Timestamp('2019-01-01')]
_test_datasets = ('TEST_DATASET',)
def mock_empty_market_data_response():
df = MarketDataResponseFrame()
df.dataset_ids = ()
return df
def map_identifiers_default_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-LIBOR-BBA" in ids:
return {"USD-LIBOR-BBA": "MAPDB7QNB2TZVQ0E"}
elif "EUR-EURIBOR-TELERATE" in ids:
return {"EUR-EURIBOR-TELERATE": "MAJNQPFGN1EBDHAE"}
elif "GBP-LIBOR-BBA" in ids:
return {"GBP-LIBOR-BBA": "MAFYB8Z4R1377A19"}
elif "JPY-LIBOR-BBA" in ids:
return {"JPY-LIBOR-BBA": "MABMVE27EM8YZK33"}
elif "EUR OIS" in ids:
return {"EUR OIS": "MARFAGXDQRWM07Y2"}
def map_identifiers_swap_rate_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m" in ids:
return {"USD-3m": "MAAXGV0GZTW4GFNC"}
elif "EUR-6m" in ids:
return {"EUR-6m": "MA5WM2QWRVMYKDK0"}
elif "KRW" in ids:
return {"KRW": 'MAJ6SEQH3GT0GA2Z'}
def map_identifiers_inflation_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "CPI-UKRPI" in ids:
return {"CPI-UKRPI": "MAQ7ND0MBP2AVVQW"}
elif "CPI-CPXTEMU" in ids:
return {"CPI-CPXTEMU": "MAK1FHKH5P5GJSHH"}
def map_identifiers_cross_basis_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m/JPY-3m" in ids:
return {"USD-3m/JPY-3m": "MA99N6C1KF9078NM"}
elif "EUR-3m/USD-3m" in ids:
return {"EUR-3m/USD-3m": "MAXPKTXW2D4X6MFQ"}
elif "GBP-3m/USD-3m" in ids:
return {"GBP-3m/USD-3m": "MA8BZHQV3W32V63B"}
def get_data_policy_rate_expectation_mocker(
start: Union[dt.date, dt.datetime] = None,
end: Union[dt.date, dt.datetime] = None,
as_of: dt.datetime = None,
since: dt.datetime = None,
fields: Union[str, Fields] = None,
asset_id_type: str = None,
**kwargs) -> pd.DataFrame:
if 'meetingNumber' in kwargs:
if kwargs['meetingNumber'] == 0:
return mock_meeting_spot()
elif 'meeting_date' in kwargs:
if kwargs['meeting_date'] == dt.date(2019, 10, 24):
return mock_meeting_spot()
return mock_meeting_expectation()
def test_parse_meeting_date():
assert tm.parse_meeting_date(5) == ''
assert tm.parse_meeting_date('') == ''
assert tm.parse_meeting_date('test') == ''
assert tm.parse_meeting_date('2019-09-01') == dt.date(2019, 9, 1)
def test_currency_to_default_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
asset_id_list = ["MAZ7RWC904JYHYPS", "MAJNQPFGN1EBDHAE", "MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8",
"MA4B66MW5E27U8P32SB"]
correct_mapping = ["MAPDB7QNB2TZVQ0E", "MAJNQPFGN1EBDHAE", "MAFYB8Z4R1377A19", "MABMVE27EM8YZK33",
"MA4J1YB8XZP2BPT8", "MA4B66MW5E27U8P32SB"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_default_swap_rate_asset(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_swap_rate_mocker)
asset_id_list = ['MAZ7RWC904JYHYPS', 'MAJNQPFGN1EBDHAE', 'MAJ6SEQH3GT0GA2Z']
correct_mapping = ['MAAXGV0GZTW4GFNC', 'MA5WM2QWRVMYKDK0', 'MAJ6SEQH3GT0GA2Z']
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_swap_rate_asset(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_inflation_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_inflation_mocker)
asset_id_list = ["MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
correct_mapping = ["MAQ7ND0MBP2AVVQW", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_inflation_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.currency_to_inflation_benchmark_rate('MA66CZBQJST05XKG') == 'MA66CZBQJST05XKG'
def test_cross_to_basis(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_cross_basis_mocker)
asset_id_list = ["MAYJPCVVF2RWXCES", "MA4B66MW5E27U8P32SB", "nobbid"]
correct_mapping = ["MA99N6C1KF9078NM", "MA4B66MW5E27U8P32SB", "nobbid"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_basis(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.cross_to_basis('MAYJPCVVF2RWXCES') == 'MAYJPCVVF2RWXCES'
def test_currency_to_tdapi_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA25DW5ZGC1BSC8Y', 'NOK')
bbid_mock.return_value = 'NOK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAFRSWPAF5QPNTP2' == correct_id
bbid_mock.return_value = 'CHF'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAW25BGQJH9P6DPT' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAA9MVX15AJNQCVG' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA6QCAP9B7ABS9HA' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAEE219J5ZP0ZKRK' == correct_id
bbid_mock.return_value = 'SEK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAETMVTPNP3199A5' == correct_id
bbid_mock.return_value = 'HKD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MABRNGY8XRFVC36N' == correct_id
bbid_mock.return_value = 'NZD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAH16NHE1HBN0FBZ' == correct_id
bbid_mock.return_value = 'AUD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAY8147CRK0ZP53B' == correct_id
bbid_mock.return_value = 'CAD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MANJ8SS88WJ6N28Q' == correct_id
bbid_mock.return_value = 'KRW'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAP55AXG5SQVS6C5' == correct_id
bbid_mock.return_value = 'INR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA20JHJXN1PD5HGE' == correct_id
bbid_mock.return_value = 'CNY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA4K1D8HH2R0RQY5' == correct_id
bbid_mock.return_value = 'SGD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA5CQFHYBPH9E5BS' == correct_id
bbid_mock.return_value = 'DKK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAF131NKWVRESFYA' == correct_id
asset = Currency('MA890', 'PLN')
bbid_mock.return_value = 'PLN'
assert 'MA890' == tm_rates._currency_to_tdapi_swap_rate_asset(asset)
replace.restore()
def test_currency_to_tdapi_basis_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA890', 'NOK')
bbid_mock.return_value = 'NOK'
assert 'MA890' == tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAQB1PGEJFCET3GG' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAGRG2VT11GQ2RQ9' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAHCYNB3V75JC5Q8' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAXVRBEZCJVH0C4V' == correct_id
replace.restore()
def test_check_clearing_house():
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house('lch')
assert tm_rates._ClearingHouse.CME == tm_rates._check_clearing_house(tm_rates._ClearingHouse.CME)
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house(None)
invalid_ch = ['NYSE']
for ch in invalid_ch:
with pytest.raises(MqError):
tm_rates._check_clearing_house(ch)
def test_get_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
assert dict(csaTerms='USD-1') == tm_rates._get_swap_csa_terms('USD', fed_funds_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_swap_csa_terms('EUR', estr_index)
assert {} == tm_rates._get_swap_csa_terms('EUR', euribor_index)
assert {} == tm_rates._get_swap_csa_terms('USD', usd_libor_index)
def test_get_basis_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
sofr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.SOFR.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
eonia_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EONIA.value]
assert dict(csaTerms='USD-1') == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, sofr_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_basis_swap_csa_terms('EUR', estr_index, eonia_index)
assert {} == tm_rates._get_basis_swap_csa_terms('EUR', eonia_index, euribor_index)
assert {} == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, usd_libor_index)
def test_match_floating_tenors():
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='6m')
assert swap_args == tm_rates._match_floating_tenors(swap_args)
def test_get_term_struct_date(mocker):
today = datetime.datetime.today()
biz_day = CustomBusinessDay()
assert today == tm_rates._get_term_struct_date(tenor=today, index=today, business_day=biz_day)
date_index = datetime.datetime(2020, 7, 31, 0, 0)
assert date_index == tm_rates._get_term_struct_date(tenor='2020-07-31', index=date_index, business_day=biz_day)
assert date_index == tm_rates._get_term_struct_date(tenor='0b', index=date_index, business_day=biz_day)
assert datetime.datetime(2021, 7, 30, 0, 0) == tm_rates._get_term_struct_date(tenor='1y', index=date_index,
business_day=biz_day)
def test_cross_stored_direction_for_fx_vol(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_stored_direction_for_fx_vol(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_usd_based_cross_for_fx_forecast(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_usd_based_cross(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_used_based_cross(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_to_usd_based_cross(Cross('FUN', 'EURUSD'))
replace.restore()
def test_cross_stored_direction(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_stored_direction_for_fx_vol(Cross('FUN', 'EURUSD'))
replace.restore()
def test_get_tdapi_rates_assets(mocker):
mock_asset_1 = GsAsset(asset_class='Rate', id='MAW25BGQJH9P6DPT', type_='Swap', name='Test_asset')
mock_asset_2 = GsAsset(asset_class='Rate', id='MAA9MVX15AJNQCVG', type_='Swap', name='Test_asset')
mock_asset_3 = GsAsset(asset_class='Rate', id='MANQHVYC30AZFT7R', type_='BasisSwap', name='Test_asset')
replace = Replacer()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1]
assert 'MAW25BGQJH9P6DPT' == tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict(asset_parameters_termination_date='10y', asset_parameters_effective_date='0b')
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = []
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict()
assert ['MAW25BGQJH9P6DPT', 'MAA9MVX15AJNQCVG'] == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
# test case will test matching sofr maturity with libor leg and flipping legs to get right asset
kwargs = dict(type='BasisSwap', asset_parameters_termination_date='10y',
asset_parameters_payer_rate_option=BenchmarkType.LIBOR,
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=BenchmarkType.SOFR,
asset_parameters_receiver_designated_maturity='1y',
asset_parameters_clearing_house='lch', asset_parameters_effective_date='Spot',
asset_parameters_notional_currency='USD',
pricing_location='NYC')
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_3]
assert 'MANQHVYC30AZFT7R' == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
def test_get_swap_leg_defaults():
result_dict = dict(currency=CurrEnum.JPY, benchmark_type='JPY-LIBOR-BBA', floating_rate_tenor='6m',
pricing_location=PricingLocation.TKO)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.JPY)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.USD, benchmark_type='USD-LIBOR-BBA', floating_rate_tenor='3m',
pricing_location=PricingLocation.NYC)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.USD)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.EUR, benchmark_type='EUR-EURIBOR-TELERATE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.EUR)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.SEK, benchmark_type='SEK-STIBOR-SIDE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.SEK)
assert result_dict == defaults
def test_check_forward_tenor():
valid_tenors = [datetime.date(2020, 1, 1), '1y', 'imm2', 'frb2', '1m', '0b']
for tenor in valid_tenors:
assert tenor == tm_rates._check_forward_tenor(tenor)
invalid_tenors = ['5yr', 'imm5', 'frb0']
for tenor in invalid_tenors:
with pytest.raises(MqError):
tm_rates._check_forward_tenor(tenor)
def mock_commod(_cls, _q):
d = {
'price': [30, 30, 30, 30, 35.929686, 35.636039, 27.307498, 23.23177, 19.020833, 18.827291, 17.823749, 17.393958,
17.824999, 20.307603, 24.311249, 25.160103, 25.245728, 25.736873, 28.425206, 28.779789, 30.519996,
34.896348, 33.966973, 33.95489, 33.686348, 34.840307, 32.674163, 30.261665, 30, 30, 30]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-05-01', periods=31, freq='H', tz=timezone('UTC')))
df.dataset_ids = _test_datasets
return df
def mock_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
11.6311,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 9))
df.dataset_ids = _test_datasets
return df
def mock_fair_price(_cls, _q):
d = {
'fairPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_natgas_forward_price(_cls, _q):
d = {
'forwardPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_fair_price_swap(_cls, _q):
d = {'fairPrice': [2.880]}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)]))
df.dataset_ids = _test_datasets
return df
def mock_implied_volatility(_cls, _q):
d = {
'impliedVolatility': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_missing_bucket_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"J20",
"K20",
"M20",
]
}
return pd.DataFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 8))
def mock_fx_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
d = {
'strikeReference': ['delta', 'spot', 'forward'],
'relativeStrike': [25, 100, 100],
'impliedVolatility': [5, 1, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-01-01', periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_fx_forecast(_cls, _q):
d = {
'fxForecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_delta(_cls, _q):
d = {
'relativeStrike': [25, -25, 0],
'impliedVolatility': [1, 5, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_empty(_cls, _q):
d = {
'strikeReference': [],
'relativeStrike': [],
'impliedVolatility': []
}
df = MarketDataResponseFrame(data=d, index=[])
df.dataset_ids = _test_datasets
return df
def mock_fx_switch(_cls, _q, _n):
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_empty)
replace.restore()
return Cross('MA1889', 'ABC/XYZ')
def mock_curr(_cls, _q):
d = {
'swapAnnuity': [1, 2, 3],
'swapRate': [1, 2, 3],
'basisSwapRate': [1, 2, 3],
'swaptionVol': [1, 2, 3],
'atmFwdRate': [1, 2, 3],
'midcurveVol': [1, 2, 3],
'capFloorVol': [1, 2, 3],
'spreadOptionVol': [1, 2, 3],
'inflationSwapRate': [1, 2, 3],
'midcurveAtmFwdRate': [1, 2, 3],
'capFloorAtmFwdRate': [1, 2, 3],
'spreadOptionAtmFwdRate': [1, 2, 3],
'strike': [0.25, 0.5, 0.75]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_cross(_cls, _q):
d = {
'basis': [1, 2, 3],
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq(_cls, _q):
d = {
'relativeStrike': [0.75, 0.25, 0.5],
'impliedVolatility': [5, 1, 2],
'impliedCorrelation': [5, 1, 2],
'realizedCorrelation': [3.14, 2.71828, 1.44],
'averageImpliedVolatility': [5, 1, 2],
'averageImpliedVariance': [5, 1, 2],
'averageRealizedVolatility': [5, 1, 2],
'impliedVolatilityByDeltaStrike': [5, 1, 2],
'fundamentalMetric': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
idx = [pd.Timestamp(datetime.datetime.now(pytz.UTC))]
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=idx)
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.datetime.now(pytz.UTC).date() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_err(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
raise MqValueError('error while getting last')
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_empty(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame()
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_norm(_cls, _q):
d = {
'relativeStrike': [-4.0, 4.0, 0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_spot(_cls, _q):
d = {
'relativeStrike': [0.75, 1.25, 1.0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_inc(_cls, _q):
d = {
'relativeStrike': [0.25, 0.75],
'impliedVolatility': [5, 1]
}
df = MarketDataResponseFrame(data=d, index=_index * 2)
df.dataset_ids = _test_datasets
return df
def mock_meeting_expectation():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2020, 1, 29)],
'endingDate': [dt.date(2020, 1, 29)],
'meetingNumber': [2],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2020, 1, 23)],
'value': [-0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_spot():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2019, 10, 30)],
'endingDate': [dt.date(2019, 12, 18)],
'meetingNumber': [0],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2019, 10, 24)],
'value': [-0.004522570525]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_absolute():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2', 'MARFAGXDQRWM07Y2'],
'location': ['NYC', 'NYC'],
'rateType': ['Meeting Forward', 'Meeting Forward'],
'startingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'endingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'meetingNumber': [0, 2],
'valuationDate': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 10, 24), datetime.date(2020, 1, 23)],
'value': [-0.004522570525, -0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_ois_spot():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Spot'],
'startingDate': [datetime.date(2019, 12, 6)],
'endingDate': [datetime.date(2019, 12, 7)],
'meetingNumber': [-1],
'valuationDate': [datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 12, 6)],
'value': [-0.00455]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_esg(_cls, _q):
d = {
"esNumericScore": [2, 4, 6],
"esNumericPercentile": [81.2, 75.4, 65.7],
"esPolicyScore": [2, 4, 6],
"esPolicyPercentile": [81.2, 75.4, 65.7],
"esScore": [2, 4, 6],
"esPercentile": [81.2, 75.4, 65.7],
"esProductImpactScore": [2, 4, 6],
"esProductImpactPercentile": [81.2, 75.4, 65.7],
"gScore": [2, 4, 6],
"gPercentile": [81.2, 75.4, 65.7],
"esMomentumScore": [2, 4, 6],
"esMomentumPercentile": [81.2, 75.4, 65.7],
"gRegionalScore": [2, 4, 6],
"gRegionalPercentile": [81.2, 75.4, 65.7],
"controversyScore": [2, 4, 6],
"controversyPercentile": [81.2, 75.4, 65.7],
"esDisclosurePercentage": [49.2, 55.7, 98.4]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_index_positions_data(
asset_id,
start_date,
end_date,
fields=None,
position_type=None
):
return [
{'underlyingAssetId': 'MA3',
'netWeight': 0.1,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA1',
'netWeight': 0.6,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA2',
'netWeight': 0.3,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
}
]
def mock_rating(_cls, _q):
d = {
'rating': ['Buy', 'Sell', 'Buy', 'Neutral'],
'convictionList': [1, 0, 0, 0]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_gsdeer_gsfeer(_cls, assetId, start_date):
d = {
'gsdeer': [1, 1.2, 1.1],
'gsfeer': [2, 1.8, 1.9],
'year': [2000, 2010, 2020],
'quarter': ['Q1', 'Q2', 'Q3']
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
return df
def mock_factor_profile(_cls, _q):
d = {
'growthScore': [0.238, 0.234, 0.234, 0.230],
'financialReturnsScore': [0.982, 0.982, 0.982, 0.982],
'multipleScore': [0.204, 0.192, 0.190, 0.190],
'integratedScore': [0.672, 0.676, 0.676, 0.674]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_commodity_forecast(_cls, _q):
d = {
'forecastPeriod': ['3m', '3m', '3m', '3m'],
'forecastType': ['spotReturn', 'spotReturn', 'spotReturn', 'spotReturn'],
'commodityForecast': [1700, 1400, 1500, 1600]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def test_skew():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_norm)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.NORMALIZED, 4)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_spot)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock.return_value = mock_empty_market_data_response()
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert actual.empty
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_inc)
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
replace.restore()
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', None, 25)
def test_skew_fx():
replace = Replacer()
cross = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = cross
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_delta)
mock = cross
actual = tm.skew(mock, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.DELTA, 25, real_time=True)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.SPOT, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.FORWARD, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', None, 25)
replace.restore()
def test_implied_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol)
idx = pd.date_range(end=datetime.datetime.now(pytz.UTC).date(), periods=4, freq='D')
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_NEUTRAL)
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL)
replace.restore()
def test_implied_vol_no_last():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
idx = pd.date_range(end=datetime.date.today() - datetime.timedelta(days=1), periods=3, freq='D')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_err)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_empty)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace.restore()
def test_implied_vol_fx():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
# for different delta strikes
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_vol)
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL, 25)
expected = pd.Series([5, 1, 2, 3], index=pd.date_range('2019-01-01', periods=4, freq='D'), name='impliedVolatility')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_PUT, 25)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_NEUTRAL)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# NORMALIZED not supported
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 25)
replace.restore()
def test_fx_forecast():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
actual = tm.fx_forecast(mock, '12m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fx_forecast(mock, '3m', real_time=True)
replace.restore()
def test_fx_forecast_inverse():
replace = Replacer()
get_cross = replace('gs_quant.timeseries.measures.cross_to_usd_based_cross', Mock())
get_cross.return_value = "MATGYV0J9MPX534Z"
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
mock = Cross("MAYJPCVVF2RWXCES", 'USD/JPY')
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1 / 1.1, 1 / 1.1, 1 / 1.1], index=_index * 3, name='fxForecast'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_vol_smile():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.FORWARD, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d')
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
with pytest.raises(NotImplementedError):
tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d', real_time=True)
replace.restore()
def test_impl_corr():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_CALL, 50, '')
replace.restore()
def test_impl_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5,
composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
i_vol = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_in.csv'))
i_vol.index = pd.to_datetime(i_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = i_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 50, datetime.date(2020, 8, 31),
source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_real_corr():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(NotImplementedError):
tm.realized_correlation(spx, '1m', real_time=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.realized_correlation(spx, '1m')
assert_series_equal(pd.Series([3.14, 2.71828, 1.44], index=_index * 3), pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_real_corr_missing():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
d = {
'assetId': ['MA4B66MW5E27U8P32SB'] * 3,
'spot': [3000, 3100, 3050],
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2020-08-01', periods=3, freq='D'))
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', lambda *args, **kwargs: df)
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 50)
replace.restore()
def test_real_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
r_vol = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_in.csv'))
r_vol.index = pd.to_datetime(r_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = r_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.realized_correlation(spx, '1m', 50, datetime.date(2020, 8, 31), source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_cds_implied_vol():
replace = Replacer()
mock_cds = Index('MA890', AssetClass.Equity, 'CDS')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.DELTA_CALL, 10)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.FORWARD, 100)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cds_implied_volatility(..., '1m', '5y', tm.CdsVolReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_avg_impl_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'impliedVolatility': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'impliedVolatility': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'impliedVolatility': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock_implied_vol = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_implied_vol.dataset_ids = _test_datasets
market_data_mock.return_value = mock_implied_vol
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25, 3, '1d')
assert_series_equal(pd.Series([1.4, 2.6, 3.33333],
index=pd.date_range(start='2020-01-01', periods=3), name='averageImpliedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqValueError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=None,
composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=101)
replace.restore()
def test_avg_realized_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_realized_volatility(mock_spx, '1m')
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageRealizedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'spot': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'spot': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'spot': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
mock_spot = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_spot.dataset_ids = _test_datasets
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_data_mock.return_value = mock_spot
actual = tm.average_realized_volatility(mock_spx, '2d', Returns.SIMPLE, 3, '1d')
assert_series_equal(pd.Series([392.874026], index=pd.date_range(start='2020-01-03', periods=1),
name='averageRealizedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', real_time=True)
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.LOGARITHMIC)
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 201)
replace.restore()
empty_positions_data_mock = replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', Mock())
empty_positions_data_mock.return_value = []
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 5)
replace.restore()
def test_avg_impl_var():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert actual.dataset_ids == _test_datasets
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_variance(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_basis_swap_spread(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', spread_benchmark_type=None, spread_tenor=None,
reference_benchmark_type=None, reference_tenor=None, forward_tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'NOK')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'NOK'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(..., '1y', real_time=True)
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['swap_tenor'] = '6y'
args['spread_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_tenor'] = '3m'
args['reference_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_tenor'] = '6m'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['forward_tenor'] = None
args['spread_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_benchmark_type'] = BenchmarkType.LIBOR
args['reference_benchmark_type'] = 'libor_3m'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_benchmark_type'] = BenchmarkType.LIBOR
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAQB1PGEJFCET3GG'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
args['reference_benchmark_type'] = BenchmarkType.SOFR
args['reference_tenor'] = '1y'
args['reference_benchmark_type'] = BenchmarkType.LIBOR
args['reference_tenor'] = '3m'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MA06ATQ9CM0DCZFC'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_swap_rate(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', benchmark_type=None, floating_rate_tenor=None, forward_tenor='0b', real_time=False)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['swap_tenor'] = '10y'
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['floating_rate_tenor'] = '1y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['forward_tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'sonia'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'fed_funds'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAZ7RWC904JYHYPS'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'EUR'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAJNQPFGN1EBDHAE'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
args['asset'] = Currency('MAJNQPFGN1EBDHAE', 'EUR')
args['benchmark_type'] = 'estr'
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_swap_annuity(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', benchmark_type=None, floating_rate_tenor=None, forward_tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'PLN')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'PLN'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.swap_annuity(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.swap_annuity(..., '1y', real_time=True)
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['swap_tenor'] = '10y'
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['floating_rate_tenor'] = '1y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['forward_tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['benchmark_type'] = BenchmarkType.SOFR
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAZ7RWC904JYHYPS'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.swap_annuity(**args)
expected = abs(tm.ExtendedSeries([1.0, 2.0, 3.0], index=_index * 3, name='swapAnnuity') * 1e4 / 1e8)
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_swap_term_structure():
replace = Replacer()
args = dict(benchmark_type=None, floating_rate_tenor=None, tenor_type=tm_rates._SwapTenorType.FORWARD_TENOR,
tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'PLN')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'PLN'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.swap_term_structure(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.swap_term_structure(..., '1y', real_time=True)
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['floating_rate_tenor'] = '3m'
args['tenor_type'] = 'expiry'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor_type'] = None
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['benchmark_type'] = BenchmarkType.LIBOR
bd_mock = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
bd_mock.return_value = pd.DataFrame(data=dict(date="2020-04-10", exchange="NYC", description="Good Friday"),
index=[pd.Timestamp('2020-04-10')])
args['pricing_date'] = datetime.date(2020, 4, 10)
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['pricing_date'] = None
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers_empty = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
identifiers_empty.return_value = {}
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
mock_asset = Currency('USD', name='USD')
mock_asset.id = 'MAEMPCXQG3T716EX'
mock_asset.exchange = 'OTC'
identifiers.return_value = [mock_asset]
d = {
'terminationTenor': ['1y', '2y', '3y', '4y'], 'swapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
pricing_date_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
pricing_date_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
market_data_mock.return_value = pd.DataFrame()
df = pd.DataFrame(data=d, index=_index * 4)
assert tm_rates.swap_term_structure(**args).empty
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
df = pd.DataFrame(data={'effectiveTenor': ['1y'], 'swapRate': [1], 'assetId': ['MAEMPCXQG3T716EX']}, index=_index)
market_data_mock.return_value = df
args['tenor_type'] = 'swap_tenor'
args['tenor'] = '5y'
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1], index=pd.to_datetime(['2020-01-01']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
d = {
'effectiveTenor': ['1y', '2y', '3y', '4y'], 'swapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
args['tenor_type'] = 'swap_tenor'
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor'] = '5y'
market_data_mock.return_value = pd.DataFrame()
df = pd.DataFrame(data=d, index=_index * 4)
assert tm_rates.swap_term_structure(**args).empty
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_basis_swap_term_structure():
replace = Replacer()
range_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
range_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
args = dict(spread_benchmark_type=None, spread_tenor=None,
reference_benchmark_type=None, reference_tenor=None, tenor_type=tm_rates._SwapTenorType.FORWARD_TENOR,
tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'NOK')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'NOK'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_term_structure(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_term_structure(..., '1y', real_time=True)
args['spread_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['spread_tenor'] = '3m'
args['reference_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['reference_tenor'] = '6m'
args['tenor_type'] = 'expiry'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['tenor_type'] = 'forward_tenor'
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['tenor'] = None
args['spread_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['spread_benchmark_type'] = BenchmarkType.LIBOR
args['reference_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['reference_benchmark_type'] = BenchmarkType.LIBOR
bd_mock = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
bd_mock.return_value = pd.DataFrame(data=dict(date="2020-04-10", exchange="NYC", description="Good Friday"),
index=[pd.Timestamp('2020-04-10')])
args['pricing_date'] = datetime.date(2020, 4, 10)
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['pricing_date'] = None
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers_empty = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
identifiers_empty.return_value = {}
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
mock_asset = Currency('USD', name='USD')
mock_asset.id = 'MAEMPCXQG3T716EX'
mock_asset.exchange = 'OTC'
identifiers.return_value = [mock_asset]
d = {
'terminationTenor': ['1y', '2y', '3y', '4y'], 'basisSwapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
pricing_date_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
pricing_date_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
market_data_mock.return_value = pd.DataFrame()
assert tm_rates.basis_swap_term_structure(**args).empty
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
d = {
'effectiveTenor': ['1y', '2y', '3y', '4y'], 'basisSwapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
args['tenor_type'] = tm_rates._SwapTenorType.SWAP_TENOR
args['tenor'] = '5y'
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
df = pd.DataFrame(data={'effectiveTenor': ['1y'], 'basisSwapRate': [1], 'assetId': ['MAEMPCXQG3T716EX']},
index=_index)
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1], index=pd.to_datetime(['2020-01-01']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_cap_floor_vol():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.cap_floor_vol(mock_usd, '5y', 50)
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='capFloorVol'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cap_floor_vol(..., '5y', 50, real_time=True)
replace.restore()
def test_cap_floor_atm_fwd_rate():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.cap_floor_atm_fwd_rate(mock_usd, '5y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='capFloorAtmFwdRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cap_floor_atm_fwd_rate(..., '5y', real_time=True)
replace.restore()
def test_spread_option_vol():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.spread_option_vol(mock_usd, '3m', '10y', '5y', 50)
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='spreadOptionVol'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.spread_option_vol(..., '3m', '10y', '5y', 50, real_time=True)
replace.restore()
def test_spread_option_atm_fwd_rate():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.spread_option_atm_fwd_rate(mock_usd, '3m', '10y', '5y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='spreadOptionAtmFwdRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.spread_option_atm_fwd_rate(..., '3m', '10y', '5y', real_time=True)
replace.restore()
def test_zc_inflation_swap_rate():
replace = Replacer()
mock_gbp = Currency('MA890', 'GBP')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='GBP', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'CPI-UKRPI': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.zc_inflation_swap_rate(mock_gbp, '1y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='inflationSwapRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.zc_inflation_swap_rate(..., '1y', real_time=True)
replace.restore()
def test_basis():
replace = Replacer()
mock_jpyusd = Cross('MA890', 'USD/JPY')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='JPYUSD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-3m/JPY-3m': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_cross)
actual = tm.basis(mock_jpyusd, '1y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='basis'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.basis(..., '1y', real_time=True)
replace.restore()
def test_td():
cases = {'3d': pd.DateOffset(days=3), '9w': pd.DateOffset(weeks=9), '2m': pd.DateOffset(months=2),
'10y': pd.DateOffset(years=10)
}
for k, v in cases.items():
actual = tm._to_offset(k)
assert v == actual, f'expected {v}, got actual {actual}'
with pytest.raises(ValueError):
tm._to_offset('5z')
def test_pricing_range():
import datetime
given = datetime.date(2019, 4, 20)
s, e = tm._range_from_pricing_date('NYSE', given)
assert s == e == given
class MockDate(datetime.date):
@classmethod
def today(cls):
return cls(2019, 5, 25)
# mock
replace = Replacer()
cbd = replace('gs_quant.timeseries.measures._get_custom_bd', Mock())
cbd.return_value = pd.tseries.offsets.BusinessDay()
today = replace('gs_quant.timeseries.measures.pd.Timestamp.today', Mock())
today.return_value = pd.Timestamp(2019, 5, 25)
gold = datetime.date
datetime.date = MockDate
# cases
s, e = tm._range_from_pricing_date('ANY')
assert s == pd.Timestamp(2019, 5, 24)
assert e == pd.Timestamp(2019, 5, 24)
s, e = tm._range_from_pricing_date('ANY', '3m')
assert s == pd.Timestamp(2019, 2, 22)
assert e == pd.Timestamp(2019, 2, 24)
s, e = tm._range_from_pricing_date('ANY', '3b')
assert s == e == pd.Timestamp(2019, 5, 22)
# restore
datetime.date = gold
replace.restore()
def test_var_swap_tenors():
session = GsSession.get(Environment.DEV, token='<PASSWORD>')
replace = Replacer()
get_mock = replace('gs_quant.session.GsSession._get', Mock())
get_mock.return_value = {
'data': [
{
'dataField': 'varSwap',
'filteredFields': [
{
'field': 'tenor',
'values': ['abc', 'xyc']
}
]
}
]
}
with session:
actual = tm._var_swap_tenors(Index('MAXXX', AssetClass.Equity, 'XXX'))
assert actual == ['abc', 'xyc']
get_mock.return_value = {
'data': []
}
with pytest.raises(MqError):
with session:
tm._var_swap_tenors(Index('MAXXX', AssetClass.Equity, 'XXX'))
replace.restore()
def test_tenor_to_month():
with pytest.raises(MqError):
tm._tenor_to_month('1d')
with pytest.raises(MqError):
tm._tenor_to_month('2w')
assert tm._tenor_to_month('3m') == 3
assert tm._tenor_to_month('4y') == 48
def test_month_to_tenor():
assert tm._month_to_tenor(36) == '3y'
assert tm._month_to_tenor(18) == '18m'
def test_forward_var_term():
idx = pd.DatetimeIndex([datetime.date(2020, 4, 1), datetime.date(2020, 4, 2)] * 6)
data = {
'varSwap': [1.1, 1, 2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5, 6.1, 6],
'tenor': ['1w', '1w', '1m', '1m', '5w', '5w', '2m', '2m', '3m', '3m', '5m', '5m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([np.nan, 5.29150, 6.55744], name='forwardVarTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02'], name='expirationDate'))
with DataContext('2020-01-01', '2020-07-31'):
actual = tm.forward_var_term(Index('MA123', AssetClass.Equity, '123'), datetime.date(2020, 4, 2))
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
expected_fx = pd.Series([np.nan, 5.29150, 6.55744, 7.24569], name='forwardVarTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02', '2020-09-02'],
name='expirationDate'))
with DataContext('2020-01-01', '2020-09-02'):
actual_fx = tm.forward_var_term(Cross('ABCDE', 'EURUSD'))
assert_series_equal(expected_fx, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_var_term(Index('MA123', AssetClass.Equity, '123'))
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_var_term(..., real_time=True)
replace.restore()
def _mock_var_swap_data(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
data = {
'varSwap': [1, 2, 3]
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
return out
def test_var_swap():
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_data)
expected = pd.Series([1, 2, 3, 4], name='varSwap', index=pd.date_range("2019-01-01", periods=4, freq="D"))
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m')
assert actual.empty
replace.restore()
def _mock_var_swap_fwd(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4, 4.5], 'tenor': ['1y', '13m']},
index=[pd.Timestamp('2019-01-04T12:00:00Z')] * 2)
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
d1 = {
'varSwap': [1, 2, 3],
'tenor': ['1y'] * 3
}
d2 = {
'varSwap': [1.5, 2.5, 3.5],
'tenor': ['13m'] * 3
}
df1 = MarketDataResponseFrame(data=d1, index=idx)
df2 = MarketDataResponseFrame(data=d2, index=idx)
out = pd.concat([df1, df2])
out.dataset_ids = _test_datasets
return out
def _mock_var_swap_1t(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4, 4.5], 'tenor': ['1y', '13m']},
index=[pd.Timestamp('2019-01-04T12:00:00Z')])
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
d1 = {
'varSwap': [1, 2, 3],
'tenor': ['1y'] * 3
}
df1 = MarketDataResponseFrame(data=d1, index=idx)
df1.dataset_ids = _test_datasets
return df1
def test_var_swap_fwd():
# bad input
with pytest.raises(MqError):
tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', 500)
# regular
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_fwd)
tenors_mock = replace('gs_quant.timeseries.measures._var_swap_tenors', Mock())
tenors_mock.return_value = ['1m', '1y', '13m']
expected = pd.Series([4.1533, 5.7663, 7.1589, 8.4410], name='varSwap',
index=pd.date_range(start="2019-01-01", periods=4, freq="D"))
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# no data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# no data for a tenor
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_1t)
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# no such tenors
tenors_mock.return_value = []
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# finish
replace.restore()
def _var_term_typical():
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'varSwap': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.var_term(Index('MA123', AssetClass.Equity, '123'))
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='varSwap')
expected = pd.Series([1, 2, 3, 4], name='varSwap', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _var_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'))
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def _var_term_fwd():
idx = pd.date_range('2018-01-01', periods=2, freq='D')
def mock_var_swap(_asset, tenor, _forward_start_date, **_kwargs):
if tenor == '1m':
series = tm.ExtendedSeries([1, 2], idx, name='varSwap')
series.dataset_ids = _test_datasets
elif tenor == '2m':
series = tm.ExtendedSeries([3, 4], idx, name='varSwap')
series.dataset_ids = _test_datasets
else:
series = tm.ExtendedSeries()
series.dataset_ids = ()
return series
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.var_swap', Mock())
market_mock.side_effect = mock_var_swap
tenors_mock = replace('gs_quant.timeseries.measures._var_swap_tenors', Mock())
tenors_mock.return_value = ['1m', '2m', '3m']
actual = tm.var_term(Index('MA123', AssetClass.Equity, '123'), forward_start_date='1m')
idx = pd.DatetimeIndex(['2018-02-02', '2018-03-02'], name='varSwap')
expected = pd.Series([2, 4], name='varSwap', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
market_mock.assert_called()
replace.restore()
return actual
def test_var_term():
with DataContext('2018-01-01', '2019-01-01'):
_var_term_typical()
_var_term_empty()
_var_term_fwd()
with DataContext('2019-01-01', '2019-07-04'):
_var_term_fwd()
with DataContext('2018-01-16', '2018-12-31'):
out = _var_term_typical()
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.var_term(..., pricing_date=300)
def test_forward_vol():
idx = pd.DatetimeIndex([datetime.date(2020, 5, 1), datetime.date(2020, 5, 2)] * 4)
data = {
'impliedVolatility': [2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5],
'tenor': ['1m', '1m', '2m', '2m', '3m', '3m', '4m', '4m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([5.58659, 5.47723], name='forwardVol',
index=pd.to_datetime(['2020-05-01', '2020-05-02']))
with DataContext('2020-01-01', '2020-09-01'):
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
with DataContext('2020-01-01', '2020-09-01'):
actual_fx = tm.forward_vol(Cross('ABCDE', 'EURUSD'), '1m', '2m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert actual.empty
# no data for required tenor
market_mock.reset_mock()
market_mock.return_value = MarketDataResponseFrame(data={'impliedVolatility': [2.1, 3.1, 5.1],
'tenor': ['1m', '2m', '4m']},
index=[datetime.date(2020, 5, 1)] * 3)
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_vol(..., '1m', '2m', tm.VolReference.SPOT, 100, real_time=True)
replace.restore()
def test_forward_vol_term():
idx = pd.DatetimeIndex([datetime.date(2020, 4, 1), datetime.date(2020, 4, 2)] * 6)
data = {
'impliedVolatility': [1.1, 1, 2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5, 6.1, 6],
'tenor': ['1w', '1w', '1m', '1m', '5w', '5w', '2m', '2m', '3m', '3m', '5m', '5m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([np.nan, 5.29150, 6.55744], name='forwardVolTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02'], name='expirationDate'))
with DataContext('2020-01-01', '2020-07-31'):
actual = tm.forward_vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.SPOT, 100,
datetime.date(2020, 4, 2))
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
expected_fx = pd.Series([np.nan, 5.29150, 6.55744, 7.24569], name='forwardVolTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02', '2020-09-02'],
name='expirationDate'))
with DataContext('2020-01-01', '2020-09-02'):
actual_fx = tm.forward_vol_term(Cross('ABCDE', 'EURUSD'), tm.VolReference.SPOT, 100)
assert_series_equal(expected_fx, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.SPOT, 100)
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_vol_term(..., tm.VolReference.SPOT, 100, real_time=True)
replace.restore()
def _vol_term_typical(reference, value):
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'impliedVolatility': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.vol_term(Index('MA123', AssetClass.Equity, '123'), reference, value)
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='impliedVolatility', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _vol_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = MarketDataResponseFrame()
actual = tm.vol_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'), tm.VolReference.DELTA_CALL, 777)
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def test_vol_term():
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_typical(tm.VolReference.SPOT, 100)
_vol_term_typical(tm.VolReference.NORMALIZED, 4)
_vol_term_typical(tm.VolReference.DELTA_PUT, 50)
_vol_term_empty()
with DataContext('2018-01-16', '2018-12-31'):
out = _vol_term_typical(tm.VolReference.SPOT, 100)
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.vol_term(..., tm.VolReference.SPOT, 100, real_time=True)
with pytest.raises(MqError):
tm.vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.DELTA_NEUTRAL, 0)
def _vol_term_fx(reference, value):
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'impliedVolatility': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
actual = tm.vol_term(Cross('ABCDE', 'EURUSD'), reference, value)
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='impliedVolatility', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def test_vol_term_fx():
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.SPOT, 50)
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.NORMALIZED, 1)
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.DELTA_NEUTRAL, 1)
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_fx(tm.VolReference.DELTA_CALL, 50)
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_fx(tm.VolReference.DELTA_PUT, 50)
def _fwd_term_typical():
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'forward': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.fwd_term(Index('MA123', AssetClass.Equity, '123'))
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='forward', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _fwd_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.fwd_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'))
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def test_fwd_term():
with DataContext('2018-01-01', '2019-01-01'):
_fwd_term_typical()
_fwd_term_empty()
with DataContext('2018-01-16', '2018-12-31'):
out = _fwd_term_typical()
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fwd_term(..., real_time=True)
def test_bucketize_price():
target = {
'7x24': [27.323461],
'offpeak': [26.004816],
'peak': [27.982783],
'7x8': [26.004816],
'2x16h': [],
'monthly': [],
'CAISO 7x24': [26.953743375],
'CAISO peak': [29.547952562499997],
'MISO 7x24': [27.076390749999998],
'MISO offpeak': [25.263605624999997],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_commod)
mock_pjm = Index('MA001', AssetClass.Commod, 'PJM')
mock_caiso = Index('MA002', AssetClass.Commod, 'CAISO')
mock_miso = Index('MA003', AssetClass.Commod, 'MISO')
with DataContext(datetime.date(2019, 5, 1), datetime.date(2019, 5, 1)):
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'MISO'
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['MISO 7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='offpeak')
assert_series_equal(pd.Series(target['MISO offpeak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
bbid_mock.return_value = 'CAISO'
actual = tm.bucketize_price(mock_caiso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['CAISO 7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_caiso, 'LMP', bucket='peak')
assert_series_equal(pd.Series(target['CAISO peak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
bbid_mock.return_value = 'PJM'
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='offpeak')
assert_series_equal(pd.Series(target['offpeak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='peak')
assert_series_equal(pd.Series(target['peak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='7x8')
assert_series_equal(pd.Series(target['7x8'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='2x16h')
assert_series_equal(pd.Series(target['2x16h'],
index=[],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', granularity='m', bucket='7X24')
assert_series_equal(pd.Series(target['monthly'],
index=[],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm.bucketize_price(mock_pjm, 'LMP', bucket='7X24', real_time=True)
with pytest.raises(ValueError):
tm.bucketize_price(mock_pjm, 'LMP', bucket='weekday')
with pytest.raises(ValueError):
tm.bucketize_price(mock_caiso, 'LMP', bucket='weekday')
with pytest.raises(ValueError):
tm.bucketize_price(mock_pjm, 'LMP', granularity='yearly')
replace.restore()
# No market data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'MISO'
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(dtype='float64'), pd.Series(actual))
replace.restore()
def test_forward_price():
# US Power
target = {
'7x24': [19.46101],
'peak': [23.86745],
'J20 7x24': [18.11768888888889],
'J20-K20 7x24': [19.283921311475414],
'J20-K20 offpeak': [15.82870707070707],
'J20-K20 7x8': [13.020144262295084],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_forward_price)
mock_spp = Index('MA001', AssetClass.Commod, 'SPP')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'SPP'
# Should return empty series as mark for '7x8' bucket is missing
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['J20 7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='PEAK'
)
assert_series_equal(pd.Series(target['peak'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['J20-K20 7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='offpeak'
)
assert_series_equal(pd.Series(target['J20-K20 offpeak'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='7x8'
)
assert_series_equal(pd.Series(target['J20-K20 7x8'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='lmp',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='5Q20',
bucket='PEAK'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='Invalid',
bucket='PEAK'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='3H20',
bucket='7x24'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='F20-I20',
bucket='7x24'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2H20',
bucket='7x24',
real_time=True
)
replace.restore()
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_missing_bucket_forward_price)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'SPP'
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(), pd.Series(actual), check_names=False)
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='PEAK'
)
assert_series_equal(pd.Series(target['peak'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['J20-K20 7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
replace.restore()
# No market data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'SPP'
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(dtype='float64'), pd.Series(actual))
replace.restore()
def test_natgas_forward_price():
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_natgas_forward_price)
mock = CommodityNaturalGasHub('MA001', 'AGT')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = pd.Series(tm.forward_price(mock,
price_method='GDD',
contract_range='F21'))
expected = pd.Series([2.880], index=[datetime.date(2019, 1, 2)], name='price')
assert_series_equal(expected, actual)
actual = pd.Series(tm.forward_price(mock,
price_method='GDD',
contract_range='F21-G21'))
expected = pd.Series([2.8629152542372878], index=[datetime.date(2019, 1, 2)], name='price')
assert_series_equal(expected, actual)
with pytest.raises(ValueError):
tm.forward_price(mock,
price_method='GDD',
contract_range='F21-I21')
with pytest.raises(ValueError):
tm.forward_price(mock,
price_method='GDD',
contract_range='I21')
replace.restore()
# No market data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.forward_price(mock,
price_method='GDD',
contract_range='F21')
assert_series_equal(pd.Series(dtype='float64'), pd.Series(actual))
replace.restore()
def test_get_iso_data():
tz_map = {'MISO': 'US/Central', 'CAISO': 'US/Pacific'}
for key in tz_map:
assert (tm._get_iso_data(key)[0] == tz_map[key])
def test_string_to_date_interval():
assert (tm._string_to_date_interval("K20")['start_date'] == datetime.date(2020, 5, 1))
assert (tm._string_to_date_interval("K20")['end_date'] == datetime.date(2020, 5, 31))
assert (tm._string_to_date_interval("k20")['start_date'] == datetime.date(2020, 5, 1))
assert (tm._string_to_date_interval("k20")['end_date'] == datetime.date(2020, 5, 31))
assert (tm._string_to_date_interval("Cal22")['start_date'] == datetime.date(2022, 1, 1))
assert (tm._string_to_date_interval("Cal22")['end_date'] == datetime.date(2022, 12, 31))
assert (tm._string_to_date_interval("Cal2012")['start_date'] == datetime.date(2012, 1, 1))
assert (tm._string_to_date_interval("Cal2012")['end_date'] == datetime.date(2012, 12, 31))
assert (tm._string_to_date_interval("Cal53")['start_date'] == datetime.date(1953, 1, 1))
assert (tm._string_to_date_interval("Cal53")['end_date'] == datetime.date(1953, 12, 31))
assert (tm._string_to_date_interval("2010")['start_date'] == datetime.date(2010, 1, 1))
assert (tm._string_to_date_interval("2010")['end_date'] == datetime.date(2010, 12, 31))
assert (tm._string_to_date_interval("3Q20")['start_date'] == datetime.date(2020, 7, 1))
assert (tm._string_to_date_interval("3Q20")['end_date'] == datetime.date(2020, 9, 30))
assert (tm._string_to_date_interval("2h2021")['start_date'] == datetime.date(2021, 7, 1))
assert (tm._string_to_date_interval("2h2021")['end_date'] == datetime.date(2021, 12, 31))
assert (tm._string_to_date_interval("3q20")['start_date'] == datetime.date(2020, 7, 1))
assert (tm._string_to_date_interval("3q20")['end_date'] == datetime.date(2020, 9, 30))
assert (tm._string_to_date_interval("2H2021")['start_date'] == datetime.date(2021, 7, 1))
assert (tm._string_to_date_interval("2H2021")['end_date'] == datetime.date(2021, 12, 31))
assert (tm._string_to_date_interval("Mar2021")['start_date'] == datetime.date(2021, 3, 1))
assert (tm._string_to_date_interval("Mar2021")['end_date'] == datetime.date(2021, 3, 31))
assert (tm._string_to_date_interval("March2021")['start_date'] == datetime.date(2021, 3, 1))
assert (tm._string_to_date_interval("March2021")['end_date'] == datetime.date(2021, 3, 31))
assert (tm._string_to_date_interval("5Q20") == "Invalid Quarter")
assert (tm._string_to_date_interval("HH2021") == "Invalid num")
assert (tm._string_to_date_interval("3H2021") == "Invalid Half Year")
assert (tm._string_to_date_interval("Cal2a") == "Invalid year")
assert (tm._string_to_date_interval("Marc201") == "Invalid date code")
assert (tm._string_to_date_interval("M1a2021") == "Invalid date code")
assert (tm._string_to_date_interval("Marcha2021") == "Invalid date code")
assert (tm._string_to_date_interval("I20") == "Invalid month")
assert (tm._string_to_date_interval("20") == "Unknown date code")
def test_implied_vol_commod():
target = {
'F21': [2.880],
'F21-H21': [2.815756],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_implied_volatility)
mock = Index('MA001', AssetClass.Commod, 'Option NG Exchange')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.implied_volatility(mock,
tenor='F21-H21')
assert_series_equal(pd.Series(target['F21-H21'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
replace.restore()
def test_fair_price():
target = {
'F21': [2.880],
'F21-H21': [2.815756],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fair_price)
mock = Index('MA001', AssetClass.Commod, 'Swap NG Exchange')
mock2 = Swap('MA002', AssetClass.Commod, 'Swap Oil')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.fair_price(mock,
tenor='F21')
assert_series_equal(pd.Series(target['F21'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm.fair_price(mock,
tenor=None)
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fair_price_swap)
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.fair_price(mock2)
assert_series_equal(pd.Series([2.880],
index=[pd.Timestamp('2019-01-02')],
name='fairPrice'),
pd.Series(actual),
)
replace.restore()
def test_weighted_average_valuation_curve_for_calendar_strip():
target = {
'F21': [2.880],
'F21-H21': [2.815756],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fair_price)
mock = Index('MA001', AssetClass.Commod, 'Swap NG Exchange')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='F21',
query_type=QueryType.FAIR_PRICE,
measure_field='fairPrice'
)
assert_series_equal(pd.Series(target['F21'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='F21-H21',
query_type=QueryType.FAIR_PRICE,
measure_field='fairPrice'
)
assert_series_equal(pd.Series(target['F21-H21'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='Invalid',
query_type=QueryType.FAIR_PRICE,
measure_field='fairPrice'
)
with pytest.raises(ValueError):
tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='F20-I20',
query_type=QueryType.FAIR_PRICE,
measure_field='fairPrice'
)
with pytest.raises(ValueError):
tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='3H20',
query_type=QueryType.PRICE,
measure_field='fairPrice'
)
replace.restore()
def test_fundamental_metrics():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
period = '1y'
direction = tm.FundamentalMetricPeriodDirection.FORWARD
actual = tm.dividend_yield(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.dividend_yield(..., period, direction, real_time=True)
actual = tm.earnings_per_share(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.earnings_per_share(..., period, direction, real_time=True)
actual = tm.earnings_per_share_positive(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.earnings_per_share_positive(..., period, direction, real_time=True)
actual = tm.net_debt_to_ebitda(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.net_debt_to_ebitda(..., period, direction, real_time=True)
actual = tm.price_to_book(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.price_to_book(..., period, direction, real_time=True)
actual = tm.price_to_cash(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.price_to_cash(..., period, direction, real_time=True)
actual = tm.price_to_earnings(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.price_to_earnings(..., period, direction, real_time=True)
actual = tm.price_to_earnings_positive(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.price_to_earnings_positive(..., period, direction, real_time=True)
actual = tm.price_to_sales(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.price_to_sales(..., period, direction, real_time=True)
actual = tm.return_on_equity(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.return_on_equity(..., period, direction, real_time=True)
actual = tm.sales_per_share(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.sales_per_share(..., period, direction, real_time=True)
replace.restore()
def test_central_bank_swap_rate(mocker):
target = {
'meeting_absolute': -0.004550907771,
'meeting_relative': -0.00002833724599999969,
'eoy_absolute': -0.003359767756,
'eoy_relative': 0.001162802769,
'spot': -0.00455
}
mock_eur = Currency('MARFAGXDQRWM07Y2', 'EUR')
with DataContext(dt.date(2019, 12, 6), dt.date(2019, 12, 6)):
replace = Replacer()
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EUR', ))]
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
mock_get_data = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
mock_get_data.return_value = mock_meeting_absolute()
actual_abs = tm.central_bank_swap_rate(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute',
dt.date(2019, 12, 6))
assert (target['meeting_absolute'] == actual_abs.loc[dt.date(2020, 1, 23)])
assert actual_abs.dataset_ids == ('CENTRAL_BANK_WATCH',)
actual_rel = tm.central_bank_swap_rate(mock_eur, tm.MeetingType.MEETING_FORWARD, 'relative',
dt.date(2019, 12, 6))
assert (target['meeting_relative'] == actual_rel.loc[dt.date(2020, 1, 23)])
assert actual_rel.dataset_ids == ('CENTRAL_BANK_WATCH',)
mock_get_data.return_value = mock_ois_spot()
actual_spot = tm.central_bank_swap_rate(mock_eur, tm.MeetingType.SPOT, 'absolute', dt.date(2019, 12, 6))
assert (target['spot'] == actual_spot.loc[dt.date(2019, 12, 6)])
assert actual_spot.dataset_ids == ('CENTRAL_BANK_WATCH',)
with pytest.raises(MqError):
tm.central_bank_swap_rate(mock_eur, 'meeting_forward')
with pytest.raises(MqError):
tm.central_bank_swap_rate(mock_eur, tm.MeetingType.MEETING_FORWARD, 'normalized', '2019-09-01')
with pytest.raises(MqError):
tm.central_bank_swap_rate(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', 5)
with pytest.raises(MqError):
tm.central_bank_swap_rate(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', '01-09-2019')
with pytest.raises(MqError):
tm.central_bank_swap_rate(mock_eur, tm.MeetingType.SPOT, 'relative')
with pytest.raises(NotImplementedError):
tm.central_bank_swap_rate(mock_eur, tm.MeetingType.SPOT, 'absolute', real_time=True)
replace.restore()
def test_policy_rate_expectation(mocker):
target = {
'meeting_number_absolute': -0.004550907771,
'meeting_number_relative': -0.000028337246,
'meeting_date_relative': -0.000028337246,
'meeting_number_spot': -0.004522570525
}
mock_eur = Currency('MARFAGXDQRWM07Y2', 'EUR')
with DataContext(dt.date(2019, 12, 6), dt.date(2019, 12, 6)):
replace = Replacer()
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EUR', ))]
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
mocker.patch.object(Dataset, 'get_data', side_effect=get_data_policy_rate_expectation_mocker)
actual_num = tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', 2)
assert (target['meeting_number_absolute'] == actual_num.loc[dt.date(2019, 12, 6)])
assert actual_num.dataset_ids == ('CENTRAL_BANK_WATCH',)
actual_date = tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute',
dt.date(2020, 1, 23))
assert (target['meeting_number_absolute'] == actual_date.loc[dt.date(2019, 12, 6)])
assert actual_date.dataset_ids == ('CENTRAL_BANK_WATCH',)
actual_num = tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'relative', 2)
assert_allclose([target['meeting_number_relative']], [actual_num.loc[dt.date(2019, 12, 6)]],
rtol=1e-9, atol=1e-15)
assert actual_num.dataset_ids == ('CENTRAL_BANK_WATCH',)
actual_num = tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', 0)
assert (target['meeting_number_spot'] == actual_num.loc[dt.date(2019, 12, 6)])
assert actual_num.dataset_ids == ('CENTRAL_BANK_WATCH',)
actual_date = tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', '2019-10-24')
assert (target['meeting_number_spot'] == actual_date.loc[dt.date(2019, 12, 6)])
assert actual_date.dataset_ids == ('CENTRAL_BANK_WATCH',)
mocker.patch.object(Dataset, 'get_data', side_effect=[mock_meeting_expectation(),
mock_empty_market_data_response()])
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'relative', 2)
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.SPOT)
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'relative', '5')
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', 5.5)
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', '01-09-2019')
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'normalized', dt.date(2019, 9, 1))
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'relative', -2)
with pytest.raises(NotImplementedError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.SPOT, 'absolute', real_time=True)
mock_get_data = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
mock_get_data.return_value = pd.DataFrame()
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', 2)
replace.restore()
def test_realized_volatility():
from gs_quant.timeseries.econometrics import volatility, Returns
from gs_quant.timeseries.statistics import generate_series
random = generate_series(100).rename('spot')
window = 10
type_ = Returns.SIMPLE
replace = Replacer()
market_data = replace('gs_quant.timeseries.measures._market_data_timed', Mock())
return_value = MarketDataResponseFrame(random)
return_value.dataset_ids = _test_datasets
market_data.return_value = return_value
expected = volatility(random, window, type_)
actual = tm.realized_volatility(Cross('MA123', 'ABCXYZ'), window, type_)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_esg_headline_metric():
replace = Replacer()
mock_aapl = Stock('MA4B66MW5E27U9VBB94', 'AAPL')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_esg)
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_NUMERIC_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='esNumericScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_POLICY_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='esPolicyScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_AGGREGATE_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='esScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_PRODUCT_IMPACT_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='esProductImpactScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.GOVERNANCE_AGGREGATE_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='gScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_MOMENTUM_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='esMomentumScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.GOVERNANCE_REGIONAL_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='gRegionalScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.CONTROVERSY_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='controversyScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_NUMERIC_PERCENTILE)
assert_series_equal(pd.Series([81.2, 75.4, 65.7], index=_index * 3, name='esNumericPercentile'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_POLICY_PERCENTILE)
assert_series_equal(pd.Series([81.2, 75.4, 65.7], index=_index * 3, name='esPolicyPercentile'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_AGGREGATE_PERCENTILE)
assert_series_equal(pd.Series([81.2, 75.4, 65.7], index=_index * 3, name='esPercentile'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_PRODUCT_IMPACT_PERCENTILE)
assert_series_equal(pd.Series([81.2, 75.4, 65.7], index=_index * 3, name='esProductImpactPercentile'),
pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.GOVERNANCE_AGGREGATE_PERCENTILE)
assert_series_equal(pd.Series([81.2, 75.4, 65.7], index=_index * 3, name='gPercentile'),
pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_MOMENTUM_PERCENTILE)
assert_series_equal(pd.Series([81.2, 75.4, 65.7], index=_index * 3, name='esMomentumPercentile'),
pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.GOVERNANCE_REGIONAL_PERCENTILE)
assert_series_equal(pd.Series([81.2, 75.4, 65.7], index=_index * 3, name='gRegionalPercentile'),
pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.CONTROVERSY_PERCENTILE)
assert_series_equal(pd.Series([81.2, 75.4, 65.7], index=_index * 3, name='controversyPercentile'),
pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_DISCLOSURE)
assert_series_equal(pd.Series([49.2, 55.7, 98.4], index=_index * 3, name='esDisclosurePercentage'),
pd.Series(actual))
with pytest.raises(NotImplementedError):
tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_NUMERIC_SCORE, real_time=True)
replace.restore()
def test_gir_rating():
replace = Replacer()
mock_aapl = Stock('MA4B66MW5E27U9VBB94', 'AAPL')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_rating)
actual = tm.gir_rating(mock_aapl, tm._RatingMetric.RATING)
assert_series_equal(pd.Series([1, -1, 1, 0], index=pd.to_datetime([datetime.date(2020, 8, 13),
datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17),
datetime.date(2020, 8, 18)]),
name='rating'), | pd.Series(actual) | pandas.Series |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '{end_dt}'".format(end_dt=end_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self, setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '{beg_dt}' & index >= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes(self, setup_path):
# GH 3499, losing frequency info on index recreation
df = DataFrame(
dict(A=Series(range(3), index=date_range("2000-1-1", periods=3, freq="H")))
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "data")
store.put("data", df, format="table")
result = store.get("data")
tm.assert_frame_equal(df, result)
for attr in ["freq", "tz", "name"]:
for idx in ["index", "columns"]:
assert getattr(getattr(df, idx), attr, None) == getattr(
getattr(result, idx), attr, None
)
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("data", df2)
assert store.get_storer("data").info["index"]["freq"] is None
# this is ok
_maybe_remove(store, "df2")
df2 = DataFrame(
dict(
A=Series(
range(3),
index=[
Timestamp("20010101"),
Timestamp("20010102"),
Timestamp("20020101"),
],
)
)
)
store.append("df2", df2)
df3 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("df2", df3)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes2(self, setup_path):
with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
df = DataFrame(
dict(
A=Series(
range(3), index=date_range("2000-1-1", periods=3, freq="H")
)
)
)
df.to_hdf(path, "data", mode="w", append=True)
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
df2.to_hdf(path, "data", append=True)
idx = date_range("2000-1-1", periods=3, freq="H")
idx.name = "foo"
df = DataFrame(dict(A=Series(range(3), index=idx)))
df.to_hdf(path, "data", mode="w", append=True)
assert read_hdf(path, "data").index.name == "foo"
with catch_warnings(record=True):
idx2 = date_range("2001-1-1", periods=3, freq="H")
idx2.name = "bar"
df2 = DataFrame(dict(A=Series(range(3), index=idx2)))
df2.to_hdf(path, "data", append=True)
assert | read_hdf(path, "data") | pandas.io.pytables.read_hdf |
import sys
import time
import numpy as np
import pandas as pd
from scipy.special import softmax
# from sklearn.feature_selection import SelectKBest
# from sklearn.feature_selection import VarianceThreshold
np.seterr(divide='ignore', invalid='ignore')
st = time.time()
mode = sys.argv[1]
train_path = sys.argv[2]
test_path = sys.argv[3]
def f(pred,Y_train):
pred = np.clip(pred,a_min=10**(-15),a_max=10**15)
v = np.log(np.sum(Y_train*pred,axis=1))
#v = np.clip(v,a_min=10**(-50),a_max=10**(50))
return abs(np.sum(v)/Y_train.shape[0])
def read_and_encode(train_path,test_path):
train = pd.read_csv(train_path, index_col = 0)
test = pd.read_csv(test_path, index_col = 0)
Y_df = train['Length of Stay']
train = train.drop(columns = ['Length of Stay'])
#Ensuring consistency of One-Hot Encoding
data = pd.concat([train, test], ignore_index = True)
cols = train.columns
cols = cols[:-1]
data = pd.get_dummies(data, columns=cols, drop_first=True)
data = data.to_numpy()
X_train = data[:train.shape[0], :]
X_test = data[train.shape[0]:, :]
Y_train = pd.get_dummies(Y_df).to_numpy()
return X_train,Y_train,X_test
def read_and_encode_d(train_path,test_path):
#t = time.time()
train = pd.read_csv(train_path, index_col = 0)
test = pd.read_csv(test_path, index_col = 0)
Y_df = train['Length of Stay']
train.drop('Birth Weight',axis=1,inplace=True)
test.drop('Birth Weight',axis=1,inplace=True)
train['n1'] = train['Facility Name'] + 1000*train['APR DRG Code']
test['n1'] = test['Facility Name'] + 1000*test['APR DRG Code']
train['n2'] = train['Facility Name'] + 1000*train['CCS Procedure Code']
test['n2'] = test['Facility Name'] + 1000*test['CCS Procedure Code']
# train['n3'] = train['Facility Name'] + 1000*train['CCS Diagnosis Code']
# test['n3'] = test['Facility Name'] + 1000*test['CCS Diagnosis Code']
# train['n4'] = train['Zip Code - 3 digits'] + 1000*train['APR DRG Code']
# test['n4'] = test['Zip Code - 3 digits'] + 1000*test['APR DRG Code']
#ss = ['CCS Procedure Code','APR DRG Code','CCS Diagnosis Code','APR MDC Code','APR Severity of Illness Code','APR Risk of Mortality','APR Medical Surgical Description']
ss = ['n1','n2']
for s in ss:
for ll in pd.unique(train[s]):
#print(ll)
m = train.loc[train[s]==ll]['Length of Stay'].median()
train[s] = train[s].mask(train[s]==ll,m)
test[s] = test[s].mask(test[s]==ll,m)
k = train[s]
l = test[s]
train.drop(s,axis=1,inplace=True)
test.drop(s,axis=1,inplace=True)
train.insert(0,s,k)
test.insert(0,s,l)
train = train.drop(columns = ['Length of Stay'])
#print(train)
#Ensuring consistency of One-Hot Encoding
data = pd.concat([train, test], ignore_index = True)
cols = train.columns
cols = cols[:-1]
data = | pd.get_dummies(data, columns=cols, drop_first=True) | pandas.get_dummies |
# -*- coding: utf-8 -*-
'''
clf.py
'''
import os
import logging
import pandas as pd
import numpy as np
from PIL import Image
from chainer.datasets import ImageDataset, LabeledImageDataset, split_dataset
def _read_image_as_array(path, dtype, img_size, img_type):
image = Image.open(path)
width, height = image.size
if img_type == 'warp':
image = image.resize((img_size, img_size))
elif img_type == 'crop':
if width > height:
diff = (width - height) / 2
box = (diff, 0, height + diff, height)
image.crop(box)
elif height > width:
diff = (height - width) / 2
box = (0, diff, width, width + diff)
image.crop(box)
image = image.resize((img_size, img_size))
elif img_type == 'natural':
w = int(width / min(width, height) * img_size)
h = int(height / min(width, height) * img_size)
image = image.resize((w, h))
# return np.asarray(image, dtype=dtype) / 255.
return np.asarray(image, dtype=dtype)
class WarpedImageDataset(ImageDataset):
def __init__(
self, paths, root='.', dtype=np.float32,
use_memory=True, img_size=224, img_type='warp', img_average=None):
super(WarpedImageDataset, self).__init__(
paths, root=root, dtype=dtype)
self.img_size = img_size
self.img_type = img_type
self.img_ave = None
self.use_memory = False
if img_average is not None:
self.img_ave = _read_image_as_array(
img_average, self._dtype, self.img_size, self.img_type)
if use_memory:
self.use_memory = True
self.data = []
for path in self._paths:
path = os.path.join(self._root, path)
image = _read_image_as_array(
path, self._dtype, self.img_size, self.img_type)
self.data.append(image)
def get_example(self, i):
if self.use_memory:
image = self.data[i]
else:
path = os.path.join(self._root, self._paths[i])
image = _read_image_as_array(
path, self._dtype, self.img_size, self.img_type)
if self.img_ave is not None:
image -= self.img_ave
if image.ndim == 2:
# image is greyscale
image = image[:, :, np.newaxis]
return image.transpose(2, 0, 1)
class WarpedLabeledImageDataset(LabeledImageDataset):
def __init__(
self, pairs, root='.', dtype=np.float32, label_dtype=np.int32,
use_memory=True, img_size=224, img_type='warp', img_average=None):
super(WarpedLabeledImageDataset, self).__init__(
pairs, root=root, dtype=dtype, label_dtype=label_dtype)
self.img_size = img_size
self.img_type = img_type
self.img_ave = None
self.use_memory = False
if img_average is not None:
self.img_ave = _read_image_as_array(
img_average, self._dtype, self.img_size, self.img_type)
if use_memory:
self.use_memory = True
self.data = []
for path, _ in self._pairs:
path = os.path.join(self._root, path)
image = _read_image_as_array(
path, self._dtype, self.img_size, self.img_type)
self.data.append(image)
def get_example(self, i):
path, int_label = self._pairs[i]
if self.use_memory:
image = self.data[i]
else:
full_path = os.path.join(self._root, path)
image = _read_image_as_array(
full_path, self._dtype, self.img_size, self.img_type)
if self.img_ave is not None:
image -= self.img_ave
if image.ndim == 2:
# image is greyscale
image = image[:, :, np.newaxis]
label = np.array(int_label, dtype=self._label_dtype)
return image.transpose(2, 0, 1), label
def get_clf_data(use_memory=True, img_size=224, img_type='warp', split_val=0.9):
def __get_train_list():
train_list_path = 'data/clf/train_master.tsv'
dataframe = | pd.read_csv(train_list_path, sep='\t', usecols=['file_name', 'category_id']) | pandas.read_csv |
# # **********************************************************************************************************
# # Important (task is often ignored when doing data science)
# # !!! Clean up project by removing any assets that are no longer needed !!!
# # Remove zip file which has downloaded and the directory to which the files were unzipped
# # System utilities
# import os
# from pathlib import Path
# import shutil
# import wget
# from zipfile import ZipFile
# # Remove the zip file downloaded
# os.remove('names.zip')
# # Remove the directory data-us
# shutil.rmtree('data-us')
# # **********************************************************************************************************
# **********************************************************************************************************
# Download Data
# Start by downloading the data and saving it in an easy-to-read format.
# The raw data of babynames is available to download at https://www.ssa.gov/oact/babynames/names.zip
# as a zip file consisting of a set of comma separated text files for each year.
# Let us download the zip file and extract the files into a directory so we can inspect the files.
# Import modules and functions
import os
import numpy as np
import pandas as pd
from wquantiles import quantile
# Plotting libraries
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (14, 6)
plt.style.use('seaborn-darkgrid')
import plotly.express as px
# Import modules and functions
import numpy as np
import pandas as pd
from wquantiles import quantile
from pathlib import Path
import shutil
import wget
from zipfile import ZipFile
cwd = os.getcwd()
# **********************************************************************************************************
# !!! Important !!!
# !!! Clean up project by removing any assets that are no longer needed !!!
# ? in sub directory /babynames/
# ? sub directory > data-us
# ? zip file > names.zip
# ? file > names.csv.gz
# ? file > lifetables.csv
# **********************************************************************************************************
pathSubDirDataUs = cwd + '\\babynames\\data-us'
if os.path.exists(pathSubDirDataUs):
shutil.rmtree(pathSubDirDataUs, ignore_errors=True)
pathFileNamesCsvGz = cwd + '\\babynames\\names.csv.gz'
if os.path.exists(pathFileNamesCsvGz):
os.remove(pathFileNamesCsvGz)
# Download the zip file from "https://www.ssa.gov/oact/babynames/names.zip"
wget.download("https://www.ssa.gov/oact/babynames/names.zip")
# Unzip data files to a directory named 'data-us'
zipName = 'names.zip'
zip_names = ZipFile(zipName)
zip_names.extractall(cwd + '\\babynames\\' + 'data-us')
zip_names.close()
# Remove zip file after it has been downloaded and the content has been unzipped
pathFileNamesZip = cwd + '\\' + zipName
if os.path.exists(pathFileNamesZip):
os.remove(pathFileNamesZip)
# Read the data for each year and combine them into a single data frame.
babynames = []
for file in Path(cwd + '\\babynames\\' + 'data-us').iterdir():
if file.name.endswith('txt'):
df = | pd.read_csv(file, names=['name', 'sex', 'births']) | pandas.read_csv |
'''
@Description: code
@Author: MiCi
@Date: 2020-03-12 08:55:59
@LastEditTime: 2020-03-12 23:20:24
@LastEditors: MiCi
'''
import pandas as pd
import numpy as np
class Basic1(object):
def __init__(self):
return
def basic_use(self):
# 数据导入
filename, query, connection_object, json_string, url, table_name = ''
# 导入csv格式文件中的数据
pd.read_csv(filename)
# 导入有分隔符的文本 (如TSV) 中的数据
pd.read_table(filename)
# 导入Excel格式文件中的数据
pd.read_excel(filename)
# 导入SQL数据表/数据库中的数据
pd.read_sql(query, connection_object)
# 导入JSON格式的字符,URL地址或者文件中的数据
pd.read_json(json_string)
# 导入经过解析的URL地址中包含的数据框 (DataFrame) 数据
pd.read_html(url)
# 导入系统粘贴板里面的数据
pd.read_clipboard()
# 导入Python字典 (dict) 里面的数据,其中key是数据框的表头,value是数据框的内容。
pd.DataFrame(dict)
# 数据导出
df = | pd.DataFrame() | pandas.DataFrame |
import sys
import pandas as pd
import matplotlib
import numpy as np
import scipy as sp
import IPython
import sklearn
import mglearn
# !! This script is not optimized.
print(f"Python version {sys.version}")
print(f"pandes version {pd.__version__}")
print(f"matplotlib version {matplotlib.__version__}")
print(f"numpy version {np.__version__}")
print(f"scipy version {sp.__version__}")
print(f"IPython version {IPython.__version__}")
print(f"scikit-learn version {sklearn.__version__}")
print(f"mglearn version {mglearn.__version__}")
from sklearn.datasets import load_iris
iris_dataset = load_iris()
print(f"iris_dataset keys : {iris_dataset.keys()}")
print(f"iris_dataset description : {iris_dataset['DESCR'][:225]}")
print(f"iris_dataset sample : \n{iris_dataset['data'][:5]}")
# train_test_split extract 75% of the dataset to train our model and keep the remaining 25% for test.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
iris_dataset["data"], iris_dataset["target"], random_state=0
)
print(f"X_train shape : {X_train.shape}")
print(f"y_train shape : {y_train.shape}")
print(f"X_test shape : {X_test.shape}")
print(f"y_test shape : {y_test.shape}")
import matplotlib.pyplot as plt
iris_dataframe = | pd.DataFrame(X_train, columns=iris_dataset.feature_names) | pandas.DataFrame |
import datareader
import dataextractor
import bandreader
import numpy as np
from _bisect import bisect
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
import pandas as pd
from scipy import stats
from sklearn import metrics
def full_signal_extract(path, ident):
"""Extract breathing and heartbeat features from one user and save features to file.
:param path: (str) main path to data, where user data is located in specific folders
:param ident: (str) user identifier
:return: Nothing. It saves features (dataframe) to a .csv file
"""
dataread = datareader.DataReader(path, ident) # initialize path to data
data = dataread.read_grc_data() # read from files
data = dataread.unwrap_grc_data() # unwrap phase. returns time and y values
samp_rate = round(len(data[1]) / max(data[0]))
dataextract = dataextractor.DataExtractor(data[0], data[1], samp_rate)
cog_res = dataread.read_cognitive_load_study(ident + '-primary-extract.txt')
end_epoch_time = dataread.get_end_time_cognitive_load_study() # end t
extracted_br_features = dataextract.raw_windowing_breathing(30, 1)
extracted_br_features['br_rate'] = np.array(extracted_br_features['br_rate'].rolling(6).mean())
extracted_br_features_roll_avg = extracted_br_features.loc[:, extracted_br_features.columns != 'times'].rolling(
6).mean()
extracted_br_features_roll_avg['times'] = extracted_br_features['times']
extracted_br_features_roll_avg['br_ok'] = extracted_br_features['br_ok']
extracted_hr_features = dataextract.raw_windowing_heartrate(10, 1)
extracted_hr_features = extracted_hr_features.drop(['hr_HRV_lf', 'hr_HRV_hf', 'hr_HRV_lf_hf'], axis=1)
extracted_hr_features_roll_avg = extracted_hr_features.loc[:, extracted_hr_features.columns != 'times'].rolling(
10).mean()
extracted_hr_features_roll_avg['times'] = extracted_hr_features['times']
extracted_hr_features_roll_avg['hr_ok'] = extracted_hr_features['hr_ok']
extracted_hr_features2 = dataextract.raw_windowing_heartrate(100, 1) # longer time to extract HRV frequency feat.
extracted_hr_features2 = extracted_hr_features2[['hr_HRV_lf', 'hr_HRV_hf', 'hr_HRV_lf_hf', 'times']]
extracted_hr_features2_roll_avg = extracted_hr_features2.loc[:, extracted_hr_features2.columns != 'times'].rolling(
10).mean()
extracted_hr_features2_roll_avg['times'] = extracted_hr_features2['times']
all_features = extracted_br_features_roll_avg
all_features = pd.merge(all_features, extracted_hr_features_roll_avg, on='times')
all_features = pd.merge(all_features, extracted_hr_features2_roll_avg, on='times')
task_timestamps = dataread.get_data_task_timestamps()
relax_timestamps = dataread.get_relax_timestamps()
bandread = bandreader.HeartRateBand(path + '_Hrates/', ident)
band_data = bandread.load()
band_data_time_start = bisect(band_data[0][:], end_epoch_time - data[0][-1] * 1000)
band_data_time_stop = bisect(band_data[0][:], end_epoch_time)
band_data = [band_data[0][band_data_time_start:band_data_time_stop],
band_data[1][band_data_time_start:band_data_time_stop]]
band_data_new__data = [(band_data[0] - band_data[0][0]) / 1000, band_data[1]]
hr_data = extracted_hr_features_roll_avg[['times', 'hr_rate']]
hr_data['times'] = hr_data['times'].astype(int)
band_data = pd.DataFrame()
band_data['times'] = band_data_new__data[0]
band_data['times'] = band_data['times'].astype(int)
band_data['band_rate'] = band_data_new__data[1]
band_data = band_data.drop_duplicates(subset=['times'])
together_data = pd.merge(hr_data, band_data, on='times')
together_data = together_data.dropna()
for i in range(len(all_features['times'])):
find_in_hr_data = bisect(together_data['times'], all_features['times'][i])
all_features.ix[i, 'band_rate'] = together_data['band_rate'][find_in_hr_data]
for i in range(len(cog_res)):
all_feat_ind_task_start = bisect(all_features['times'], task_timestamps[i][0])
all_feat_ind_task_end = bisect(all_features['times'], task_timestamps[i][1])
for j in cog_res.columns:
all_features.ix[all_feat_ind_task_start:all_feat_ind_task_end, j] = cog_res.iloc[i][j]
if cog_res.iloc[i][j] == 'GC' or cog_res.iloc[i][j] == 'PT':
all_features.ix[all_feat_ind_task_start:all_feat_ind_task_end, 'keyboard_task'] = True
elif cog_res.iloc[i][j] == 'HP' or cog_res.iloc[i][j] == 'FA' or cog_res.iloc[i][j] == 'NC' or \
cog_res.iloc[i][j] == 'SX':
all_features.ix[all_feat_ind_task_start:all_feat_ind_task_end, 'keyboard_task'] = False
for k in range(all_feat_ind_task_end - all_feat_ind_task_start + 1):
all_features.ix[k + all_feat_ind_task_start, 'on_task_or_break_index'] = k
for k in range(all_feat_ind_task_end - all_feat_ind_task_start, -1, -1):
all_features.ix[all_feat_ind_task_end - k, 'on_task_or_break_index_down'] = k
all_features.ix[all_feat_ind_task_start:all_feat_ind_task_end, 'on_task'] = True
for i in range(len(relax_timestamps)):
all_feat_ind_task_start = bisect(all_features['times'], relax_timestamps[i][0])
all_feat_ind_task_end = bisect(all_features['times'], relax_timestamps[i][1])
new_end = all_feat_ind_task_end + 30
# if i==0:
# continue
for k in range(all_feat_ind_task_end - all_feat_ind_task_start + 1):
all_features.ix[k + all_feat_ind_task_start, 'on_task_or_break_index'] = k
all_features.ix[k + all_feat_ind_task_start, 'consecutive_break'] = i
for k in range(new_end - all_feat_ind_task_start + 1):
all_features.ix[k + all_feat_ind_task_start, 'on_break_and_after_index'] = k
if k <= 15:
all_features.ix[k + all_feat_ind_task_start, 'engagement_increase'] = False
elif k <= 30:
all_features.ix[k + all_feat_ind_task_start, 'engagement_increase'] = np.nan
else:
all_features.ix[k + all_feat_ind_task_start, 'engagement_increase'] = True
for k in range(all_feat_ind_task_end - all_feat_ind_task_start, -1, -1):
all_features.ix[all_feat_ind_task_end - k, 'on_task_or_break_index_down'] = k
all_features.ix[all_feat_ind_task_start:all_feat_ind_task_end, 'on_task'] = False
all_features['person_id'] = cog_res['person_id'][0]
all_features.to_csv(path_or_buf=path + ident + '/' + ident + '-data.csv', index=False)
def extract_for_all_users_and_combine(path, idents, outfile):
for i in idents:
print(i)
full_signal_extract(path, i)
append_csv_files(path, idents, outfile)
def plot_all_full_signals(path, idents):
for i in idents:
print(i)
plot_whole_signal_and_tasks_times(path, i)
def compare_extracted_hr_and_band(path, ident):
"""Compater heart rates acquired wirelessly and with Microfost Band.
:param path: (str) main path to data, where user data is located in specific folders
:param ident: (str) user identifier
:return: MAE, MSE, CORRelation values of the aligned HR time series
"""
dataread = datareader.DataReader(path, ident) # initialize path to data
data = dataread.read_grc_data() # read from files
data = dataread.unwrap_grc_data() # unwrap phase. returns time and y values
samp_rate = round(len(data[1]) / max(data[0]))
dataextract = dataextractor.DataExtractor(data[0], data[1], samp_rate)
cog_res = dataread.read_cognitive_load_study(ident + '-primary-extract.txt')
end_epoch_time = dataread.get_end_time_cognitive_load_study() # end t
extracted_br_features = dataextract.raw_windowing_breathing(30, 1)
extracted_br_features['br_rate'] = np.array(extracted_br_features['br_rate'].rolling(6).mean())
extracted_br_features_roll_avg = extracted_br_features.loc[:, extracted_br_features.columns != 'times'].rolling(
6).mean()
extracted_br_features_roll_avg['times'] = extracted_br_features['times']
extracted_br_features_roll_avg['br_ok'] = extracted_br_features['br_ok']
extracted_hr_features = dataextract.raw_windowing_heartrate(10, 1)
extracted_hr_features = extracted_hr_features.drop(['hr_HRV_lf', 'hr_HRV_hf', 'hr_HRV_lf_hf'], axis=1)
extracted_hr_features_roll_avg = extracted_hr_features.loc[:, extracted_hr_features.columns != 'times'].rolling(
10).mean()
extracted_hr_features_roll_avg['times'] = extracted_hr_features['times']
extracted_hr_features_roll_avg['hr_ok1'] = extracted_hr_features['hr_ok']
bandread = bandreader.HeartRateBand(path + '_Hrates/', ident)
band_data = bandread.load()
band_data_time_start = bisect(band_data[0][:], end_epoch_time - data[0][-1] * 1000)
band_data_time_stop = bisect(band_data[0][:], end_epoch_time)
band_data = [band_data[0][band_data_time_start:band_data_time_stop],
band_data[1][band_data_time_start:band_data_time_stop]]
band_data_new_data = [(band_data[0] - band_data[0][0]) / 1000, band_data[1]]
plt.figure(1)
plt.clf()
plt.plot(extracted_hr_features_roll_avg['times'], extracted_hr_features_roll_avg['hr_rate'], color='orange',
label='Wi-Mind heart rate')
plt.plot(band_data_new_data[0], band_data_new_data[1], color='green', label='Microsoft Band heart rate')
plt.xlabel('time (s)')
plt.ylabel('heart rate')
plt.legend()
plt.show()
hr_data = extracted_hr_features_roll_avg[['times', 'hr_rate']]
hr_data['times'] = hr_data['times'].astype(int)
band_data = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[48]:
import pandas as pd
import urllib
import numpy as np
import json
from tqdm.autonotebook import tqdm
#%matplotlib inline
tqdm.pandas()
import dask.dataframe as dd
from dask.multiprocessing import get
from dask.diagnostics import ProgressBar
from datetime import datetime
import matplotlib.pyplot as plt
from IPython.display import display
# In[41]:
import urllib3
# In[42]:
http = urllib3.PoolManager()
# In[43]:
from config_batch import *
# # Functions
# In[44]:
ws_hostname = "127.0.1.1"
ws_hostname = "10.1.0.45"
# ws_hostname = "192.168.1.3"
# In[45]:
def call_ws(addr_data, check_result=True, structured_osm=False): #lg = "en,fr,nl"
t = datetime.now()
params = urllib.parse.urlencode({"street": addr_data[street_field],
"housenumber": addr_data[housenbr_field],
"city": addr_data[city_field],
"postcode": addr_data[postcode_field],
"country": addr_data[country_field],
"check_result" : "yes" if check_result else "no",
"struct_osm" : "yes" if structured_osm else "no"
})
url = f"http://{ws_hostname}:5000/search/?{params}"
print(url)
try:
with urllib.request.urlopen(url) as response:
res = response.read()
res = json.loads(res)
# print(res)
res["time"] = datetime.now() - t
return res
except Exception as e:
return str(e)
# In[16]:
def call_ws_batch(addr_data, mode="geo", with_reject=False, check_result=True, structured_osm=False): #lg = "en,fr,nl"
# print(addr_data)
# print(addr_data.shape)
# print()
file_data = addr_data.rename(columns = {
street_field : "street",
housenbr_field: "housenumber",
postcode_field: "postcode",
city_field: "city",
country_field: "country",
addr_key_field : "addr_key"
}).to_csv(index=False)
r = http.request(
'POST',
f'http://{ws_hostname}:5000/batch',
fields= {
'media': ('addresses.csv', file_data),
'mode': mode,
"with_rejected" : "yes" if with_reject else "no",
"check_result" : "yes" if check_result else "no",
"struct_osm" : "yes" if structured_osm else "no"
})
try:
res = pd.DataFrame(json.loads(r.data.decode('utf-8')))
except ValueError:
print("Cannot decode result:")
print(json.loads(r.data.decode('utf-8')))
return
# display(res)
return res
# In[46]:
def expand_json(addresses):
addresses["status"]= addresses.json.apply(lambda d: "error" if "error" in d else "match" if "match" in d else "rejected")
addresses["time"] = addresses.json.apply(lambda d: d["time"])
addresses["timing"] = addresses.json.apply(lambda d: d["timing"] if "timing" in d else {})
addresses["method"]= addresses.json.apply(lambda d: d["match"][0]["method"] if len(d)>0 and "match" in d else "none")
for field in ["street", "number", "postcode", "city"]:
addresses[field]= addresses.json.apply(lambda d: d["match"][0]["addr_out_"+field] if len(d)>0 and "match" in d else "")
return
# # Calls
# ## Single address calls
# In[49]:
call_ws({street_field: "Av. Fonsny",
housenbr_field: "20",
city_field: "Saint-Gilles",
postcode_field: "1060",
country_field: "Belgium"}, check_result=True, structured_osm=False)
# In[21]:
call_ws({street_field: "",
housenbr_field: "",
city_field: "Dinant",
postcode_field: "5500",
country_field: "Belgium"}, check_result=True, structured_osm=True)
# In[11]:
call_ws({street_field: "Fechtergasse",
housenbr_field: "16/13",
city_field: "Wenen",
postcode_field: "1090",
country_field: "Oostenrijk"}, check_result=False, structured_osm=False)
# In[12]:
call_ws({street_field: "Fechtergasse 16/13 1090 Wenen",
housenbr_field: "",
city_field: "",
postcode_field: "",
country_field: "Oostenrijk"}, check_result=False, structured_osm=False)
# ## Batch calls (row by row)
# In[38]:
addresses = get_addresses("address.csv.gz")
addresses = addresses.sample(100).copy()
# ### Simple way
# In[74]:
addresses["json"] = addresses.progress_apply(call_ws, check_result=True, structured_osm=False, axis=1)
# ### Using Dask
# In[17]:
dd_addresses = dd.from_pandas(addresses, npartitions=4)
dask_task = dd_addresses.apply(call_ws, meta=('x', 'str'), axis=1)
with ProgressBar():
addresses["json"] = dask_task.compute()
# In[26]:
expand_json(addresses)
# In[27]:
addresses
# ## Batch calls (batch WS)
# ### Single block
# In[39]:
# Only geocoding
# addresses["StreetFR"] = ""
call_ws_batch(addresses, mode="geo", check_result=True, structured_osm=True)
# In[62]:
# Geocode + address
call_ws_batch(addresses, mode="short")
# In[63]:
# Geocode + address, with rejected addresses
call_ws_batch(addresses, mode="long", with_reject=True)
# ### Batch blocs
# In[21]:
def call_ws_batch_chunks(addr_data, mode="geo", with_reject=False, check_result=True, structured_osm=False, chunk_size=100):
## TODO : find a better way with dask? It seems that map_partitions does not support function returning dataframes.
chunks = np.array_split(addr_data, addr_data.shape[0]//chunk_size)
res= [call_ws_batch(chunk, mode=mode,
check_result=check_result,
structured_osm=structured_osm) for chunk in tqdm(chunks)]
df_res = | pd.concat(res, sort=False) | pandas.concat |
# ENRICHMENT SCRIPT
import random
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.special import comb
from collections import Counter
import matplotlib.pyplot as plt
from scipy.stats import hypergeom
from pyclustering.cluster.kmedoids import kmedoids
from statsmodels.stats.multitest import multipletests
from ast import literal_eval
# DESCRIPTION.
'''
This script contains all the functions which are necessary to perfrom an enrichment analyses
'''
# Function to calculate the percentage of Clusters with at least one GO term enriched:
def Function_Calculate_Clust_Perc(Data_frame, total_K):
Result = pd.DataFrame(columns=["K_Number", "Enrichment_Proportions"])
for k in range(1,total_K,5):
container = Data_frame[Data_frame.Option == (k)]
result_it = container[container.Enriched_GO != 0]
result_it = len(result_it.index) * 100 / (k)
if result_it != 0:
Iterator_DB = pd.DataFrame({'K_Number' : [k],
'Enrichment_Proportions' : [result_it]})
Result = Result.append(Iterator_DB)
else:
Iterator_DB = pd.DataFrame({'K_Number' : [k],
'Enrichment_Proportions' : [0]})
Result = Result.append(Iterator_DB)
return(Result)
# Function to Calculate Percentage GO terms enriched in each k option:
def Function_GO_enriched(Data_frame, total_GO_Terms, total_K):
Result = pd.DataFrame(columns=["K_Number", "GO_Proportions"])
for k in range(1,total_K,5):
container = Data_frame[Data_frame.Option == (k)]
if container.size != 0:
GO_terms = pd.DataFrame(container["Term"])
GO_terms_iter = []
for index, rows in GO_terms.iterrows():
GO_terms_iter.extend([rows.Term])
GO_terms_iter_No_Blank = [x for x in GO_terms_iter if x != []]
GO_terms_flat = [item for sublist in GO_terms_iter_No_Blank for item in sublist]
Total_GO = len(Counter(GO_terms_flat).keys())*100/total_GO_Terms
Iterator = pd.DataFrame({'K_Number' : [k],
'GO_Proportions' : [Total_GO]})
Result = Result.append(Iterator)
elif container.size == 0:
Iterator = pd.DataFrame({'K_Number' : [k],
'GO_Proportions' : [0]})
Result = Result.append(Iterator)
return(Result)
# Function to calculate the genes that are enriched per each cluster
def Function_Gene_enriched(Data_frame_Genes, Data_frame_Results, total_K, Gene_Names):
total_genes = Gene_Names.size
genes_enriched = pd.DataFrame(columns=["K_Option", "Total_enriched"])
for k in range(1, total_K, 5):
container_genes = Data_frame_Genes[Data_frame_Genes.Option == (k)]
container_terms_enriched = Data_frame_Results[Data_frame_Results.Option == (k)]
Count = 0
for cluster in range(1, k):
genes_cluster = container_genes[container_genes.Cluster == cluster]
GO_cluster = container_terms_enriched[container_terms_enriched.Cluster == cluster]
if GO_cluster["Term"].size == 0:
Count = Count + 0
elif GO_cluster["Term"].size != 0:
# Take the GO terms (to flat variable):
GO_cluster = pd.DataFrame(GO_cluster.Term)
GO_terms_iter = []
for index, rows in GO_cluster.iterrows():
GO_terms_iter.extend([rows.Term])
GO_terms_iter_No_Blank = [x for x in GO_terms_iter if x != []]
GO_terms_flat = [item for sublist in GO_terms_iter_No_Blank for item in sublist]
# Transform the data into a data frame:
GO_erniched_cluster_k = pd.DataFrame({"GO_Term" : GO_terms_flat})
# Merge to count the number:
genes = genes_cluster[genes_cluster.GO_Term.isin(GO_erniched_cluster_k["GO_Term"])]
# Count
Count = Count + genes["Gene"].nunique()
Total = ((Count * 100)/total_genes)
Result = pd.DataFrame({"K_Option": k, "Total_enriched": [Total]})
genes_enriched = genes_enriched.append(Result)
return(genes_enriched)
# Function to choose the cluster method:
def Cluster_Option(option, genes, Distance,
method_clust = "Kmedoids"):
if method_clust == "Kmedoids":
medoids_random= random.choices(list(range(len(genes))), k= option)
medoids_result = kmedoids(np.array(Distance), medoids_random , data_type = "distance_matrix")
medoids_result = medoids_result.process()
clusters_i = medoids_result.get_clusters()
return clusters_i
else:
print("No more avaliable distances yet")
# Function to perform the enrichment:
def Enrichment_Analyses_GO_terms(Name_Network,
save_directory,
Annotation_Directory,
Original_Network_Name,
enrichment = "NO",
MaxSize = 500,
MinSize = 5,
Repetitions = 10,
total_K = 100,
Rand_K_Compare = 66,
comparison = "Bin_VS_Prob"
):
## Preparation of the Network to Enrich ##
# All the information of the data base:
GO_Complete_Experimental = pd.read_csv(Annotation_Directory, sep = "\t",
header = 0)
GO_Complete_Experimental.drop("Level", inplace = True, axis = 1)
GO_Complete_Experimental.columns = ["Gene", "GO_Term"]
# The genes of our Network:
genes_Network = pd.read_csv(save_directory + "_Gene_Names_" + str(Original_Network_Name) ,
sep = ",", header = None, skiprows=[0])
genes_Network = pd.DataFrame({"Gene" : genes_Network[1]})
# Annotation of the genes of our Network:
GO_Join_Network = GO_Complete_Experimental[GO_Complete_Experimental.Gene.isin(genes_Network.Gene)]
# Filters:
Cut_By = pd.DataFrame(GO_Join_Network.groupby('GO_Term')['Gene'].nunique(dropna=True))
Cut_By = Cut_By[Cut_By.Gene > MinSize]
Cut_By = Cut_By[Cut_By.Gene < MaxSize]
GO_Join_Network = GO_Join_Network[GO_Join_Network.GO_Term.isin(Cut_By.index)]
## Distances Preparation for clustering ##
# Loading:
Distance = pd.read_csv(save_directory + "_Result_Tijana_Final_" + Name_Network,
sep=" ", header = 0, )
Distance.set_index('1', inplace=True)
## Variables to store the results ##
# Result DataFrames:
Results_GO_Enrichment_Final = pd.DataFrame(columns=["K_Option", "Terms_Enriched"])
Results_Cluster_Enrichment_Final = pd.DataFrame(columns=["K_Option", "Cluster_Enriched"])
Results_Genes_Percent_Final = | pd.DataFrame(columns=["K_Option", "Total_enriched"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import holidays
from config import log
from datetime import date
from pandas.tseries.offsets import BDay
from collections import defaultdict
from xbbg import blp
logger = log.get_logger()
class clean_trade_file():
def __init__(self, trade_file, reuse_ticker_dict):
self.trade_file = trade_file
self.reuse_ticker_dict = reuse_ticker_dict
def run(self):
def adjust_reuse_ticker(trade_file, reuse_ticker_dict):
for key, value in reuse_ticker_dict.items():
this_reuse_index = trade_file[(trade_file["bbg_ticker"] == key) &
(trade_file["effective_date"] <= pd.Timestamp(value[-1]))].index
trade_file.loc[this_reuse_index, "bbg_ticker"] = value[0]
return trade_file
def adjust_holiday(trade_file):
country_list = list(trade_file["listing_place"].drop_duplicates())
start_date_dict = defaultdict(list)
end_date_dict = defaultdict(list)
for country in country_list:
this_country_df = trade_file[trade_file["listing_place"] == country].copy()
this_holiday = holidays.CountryHoliday(country)
holiday_start_date = [start_date for start_date in list(this_country_df["trade_start_date"].drop_duplicates())
if start_date in this_holiday]
holiday_end_date = [end_date for end_date in list(this_country_df["trade_end_date"].drop_duplicates())
if end_date in this_holiday]
if holiday_start_date != []:
for date in holiday_start_date:
adjust_date = date + BDay(1)
while adjust_date in this_holiday:
adjust_date += BDay(1)
start_date_dict[date] = [adjust_date]
if holiday_end_date != []:
for date in holiday_end_date:
adjust_date = date - BDay(1)
while adjust_date in this_holiday:
adjust_date -= BDay(1)
end_date_dict[date] = [adjust_date]
adjust_num = 0
for adjust_dict in [start_date_dict, end_date_dict]:
date_column = "trade_start_date" if adjust_num == 0 else "trade_end_date"
for key, value in adjust_dict.items():
adjust_index = trade_file[trade_file[date_column] == key].index
trade_file.loc[adjust_index, date_column] = value[0]
adjust_num += 1
return trade_file
# get trade id
trade_file = self.trade_file.reset_index().rename(columns={"index": "trade_id"})
logger.info("Created Trade Id")
# adjust end date beyond today
trade_file["trade_end_date"] = [
pd.Timestamp(date.today() - BDay(1)) if end_date >= pd.Timestamp(date.today()) else end_date
for end_date in trade_file["trade_end_date"]
]
logger.info("Adjusted End Date beyond Today")
# update reuse ticker
trade_file = adjust_reuse_ticker(trade_file=trade_file, reuse_ticker_dict=self.reuse_ticker_dict)
logger.info("Updated re-used BBG ticker")
# adjust holiday
trade_file = adjust_holiday(trade_file=trade_file)
logger.info("Adjusted Start Date and End Date based on Holidays")
return trade_file
class get_backtest_files():
def __init__(self, trade_file, funding_source, output_hsci_trade_file_path, output_hsci_backtest_file_path):
self.trade_file = trade_file
self.funding_source = funding_source
self.output_hsci_trade_file_path = output_hsci_trade_file_path
self.output_hsci_backtest_file_path = output_hsci_backtest_file_path
def run(self):
def reconstruct_price_data(price_data):
price_data = price_data.unstack().reset_index()
price_data.columns = ["bbg_ticker", "item_name", "date", "item_value"]
price_data["item_value"] = price_data["item_value"].astype("float")
price_data["date"] = price_data["date"].astype("datetime64[ns]")
return price_data
def adjust_start_end_date_based_on_trade_data(this_trade_df, price_data):
# halt flag dataframe
active_df = price_data[price_data["item_name"] == "volume"].copy()
active_df = active_df.dropna(subset=["item_value"])
active_stock = list(active_df["bbg_ticker"].drop_duplicates())
halt_list = [stock for stock in this_stock_list if stock not in active_stock]
halt_df = pd.DataFrame(index=halt_list).reset_index().rename(columns={"index": "bbg_ticker"})
halt_df["halt_flag"] = True
logger.info("Got Halt Flag")
# ipo or delist dataframe
start_end_date_df = active_df.groupby(["bbg_ticker"])["date"].agg(["min", "max"])
ipo_df = start_end_date_df[start_end_date_df["min"] != start_date].reset_index().rename(
columns={"min": "ipo_date"}).drop(columns="max")
delist_df = start_end_date_df[start_end_date_df["max"] != end_date].reset_index().rename(
columns={"max": "delist_date"}).drop(columns="min")
logger.info("Got IPO Date and Delist Date")
# ipo return
ipo_return_list = []
if not ipo_df.empty:
for ticker in list(ipo_df["bbg_ticker"].drop_duplicates()):
ipo_return = list(price_data[(price_data["item_name"] == "last_price") &
(price_data["bbg_ticker"] == ticker)].sort_values("date")[
"item_value"].dropna())[:2]
ipo_return = (ipo_return[-1] / ipo_return[0] - 1) * 100
ipo_return_list.append(ipo_return)
ipo_df["ipo_return"] = ipo_return_list
logger.info("Got IPO Return")
# get adjusted trade df
if not halt_df.empty:
this_trade_df = pd.merge(this_trade_df, halt_df, on=["bbg_ticker"], how="left")
this_trade_df["halt_flag"] = this_trade_df["halt_flag"].fillna(False)
else:
this_trade_df["halt_flag"] = False
if not ipo_df.empty:
this_trade_df = pd.merge(this_trade_df, ipo_df, on=["bbg_ticker"], how="left")
else:
this_trade_df["ipo_date"] = pd.NaT
this_trade_df["ipo_return"] = np.nan
if not delist_df.empty:
this_trade_df = pd.merge(this_trade_df, delist_df, on=["bbg_ticker"], how="left")
else:
this_trade_df["delist_date"] = pd.NaT
this_trade_df["trade_start_date"] = [trade_start_date if pd.isnull(ipo_date) else ipo_date
for trade_start_date, ipo_date
in np.array(this_trade_df[["trade_start_date", "ipo_date"]])]
this_trade_df["trade_end_date"] = [trade_end_date if pd.isnull(delist_date) else delist_date
for trade_end_date, delist_date
in np.array(this_trade_df[["trade_end_date", "delist_date"]])]
return this_trade_df
def get_beta(this_trade_df, price_data, funding_source):
stock_beta_df = price_data[(price_data["item_name"] == "beta_adj_overridable") &
(price_data["date"].isin(
list(this_trade_df["trade_start_date"].drop_duplicates())))].copy()
stock_beta_df = stock_beta_df[["bbg_ticker", "date", "item_value"]].rename(
columns={"item_value": "stock_beta",
"date": "trade_start_date"})
this_trade_df = pd.merge(this_trade_df, stock_beta_df, on=["bbg_ticker", "trade_start_date"], how="left")
fund_beta_df = stock_beta_df[stock_beta_df["bbg_ticker"] == funding_source].rename(
columns={"stock_beta": "fund_beta"})
this_trade_df = pd.merge(this_trade_df, fund_beta_df.drop(columns=["bbg_ticker"]), on=["trade_start_date"],
how="left")
return this_trade_df
def get_backtesting_returns(this_trade_df, price_data, funding_source, trade_start_date, trade_end_date):
this_return_df = this_trade_df[(this_trade_df["trade_start_date"] == trade_start_date) &
(this_trade_df["trade_end_date"] == trade_end_date) &
(this_trade_df["halt_flag"] == False)].copy()
if not this_return_df.empty:
this_ticker_list = list(this_return_df["bbg_ticker"].drop_duplicates())
this_price_data = price_data[(price_data["bbg_ticker"].isin(this_ticker_list + [funding_source])) &
(price_data["date"] >= trade_start_date) &
(price_data["date"] <= trade_end_date) &
(price_data["item_name"] == "last_price")].copy()
# calculate return [stock, funding, long_short]
this_pivot_return_df = pd.pivot_table(this_price_data, index="date", columns="bbg_ticker",
values="item_value")
this_pivot_return_df = this_pivot_return_df.pct_change()
this_pivot_return_df = this_pivot_return_df.fillna(0)
this_daily_stock_return_df = this_pivot_return_df.stack().reset_index().rename(columns={0: "daily_stock_return"})
this_daily_fund_return_df = this_daily_stock_return_df[this_daily_stock_return_df["bbg_ticker"] == funding_source].rename(
columns={"daily_stock_return": "daily_fund_return"})
this_pivot_return_df = (1 + this_pivot_return_df).cumprod() - 1
this_stock_return_df = this_pivot_return_df.stack().reset_index().rename(columns={0: "stock_return"})
this_fund_return_df = this_stock_return_df[this_stock_return_df["bbg_ticker"] == funding_source].rename(
columns={"stock_return": "fund_return"})
this_backtest_df = this_return_df[["trade_id", "bbg_ticker"]].copy()
this_backtest_df = pd.merge(this_backtest_df, this_daily_stock_return_df, on=["bbg_ticker"], how="left")
this_backtest_df = pd.merge(this_backtest_df, this_daily_fund_return_df.drop(columns=["bbg_ticker"]), on=["date"], how="left")
this_backtest_df = pd.merge(this_backtest_df, this_stock_return_df, on=["bbg_ticker", "date"], how="left")
this_backtest_df = pd.merge(this_backtest_df, this_fund_return_df.drop(columns=["bbg_ticker"]), on=["date"], how="left")
this_backtest_df[["stock_return", "fund_return", "daily_fund_return", "daily_stock_return"]] *= 100
this_backtest_df["long_short_return"] = this_backtest_df["stock_return"] - this_backtest_df["fund_return"]
# get date index
this_backtest_df = pd.merge(this_backtest_df, this_trade_df[["trade_id", "effective_date"]],
on=["trade_id"], how="left")
this_backtest_df["date_index"] = [np.busday_count( | pd.Timestamp(effect_date) | pandas.Timestamp |
import logging
from copy import deepcopy
import numpy as np
import pandas as pd
from reamber.osu.OsuMap import OsuMap
from reamber.osu.OsuSample import OsuSample
log = logging.getLogger(__name__)
def hitsound_copy(m_from: OsuMap, m_to: OsuMap, inplace: bool = False) -> OsuMap:
""" Copies the hitsound from mFrom to mTo
:param inplace: Whether to just modify this instance or return a modified copy
:param m_from: The map you want to copy from
:param m_to: The map you want to copy to, it doesn't mutate this.
:return: A copy of mTo with the copied hitsounds.
"""
df_from = | pd.concat([i.df for i in m_from.notes], sort=False) | pandas.concat |
import sys
import re
import os
import csv
import shutil
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import seaborn as sns
import scipy.stats as stats
from tabulate import tabulate
from NotSoFastQC.modules import module_dict as md
from NotSoFastQC.utils import TerminalLog as Log
ENTRY = ">>"
END_MODULE = ">>END_MODULE"
FILTER_TEXT = 0
HEADER = 1
ROWS = 2
# Class that represents an object processing the FastQC file input and generating all data needed.
# Graph generation functions can most probably be optimised as there is some repeated code.
# Each graph is slightly different however and would need a bit of re-shuffling to work. Something to work on maybe.
class FastQCManager:
"""FastQCManager takes the user input validated args and creates reports for all selected modules.
Table of Basic Statistics gets displayed in console
"""
def __init__(self, validated_args, overwrite):
self.file = validated_args[0]
self.directory = validated_args[1]
self.modules = validated_args[2]
self.encoding = None
Log.notify("STARTING MODULE REPORTS...")
# Creates directory and reports for each module in succession.
# Could be optimised so that file doesn't need to be opened and parsed for each module given
for module in self.modules:
self.module_name = md.get(module)
self.working_path = os.path.join(self.directory, self.module_name.replace(' ', '_'))
self.build_directory(overwrite)
self.data = self.pull_data(self.module_name)
self.write_reports()
# For some reason graph generation doesn't work in terminal but does in PyCharm... can't figure this out.
# I know it is exclusively to do with sns.lineplot() (based on console output), but works fine in my IDE?
# With longer deadline, I think this could be fixed by altering code, but will probably take time.
try:
self.switch_graph(module)
except IndexError:
Log.warning("Graph cannot be made, this problem is recognised.")
self.show_basic_statistics()
def mod_1_graph(self):
"""Creates graph for Per base sequence quality"""
data = []
bases = []
means = {}
# Puts data into format that can be used for creating graphs
for row in self.data[ROWS]:
bases.append(row[0])
means[int(row[0])-1] = float(row[1])
data.append([row[5], row[3], row[2], row[4], row[6]])
# Sets window size for graph
fig, ax = plt.subplots(figsize=(12, 10))
sns.boxplot(data=data, whis=[0, 100], color="yellow", zorder=1)
sns.lineplot(data=means, ax=ax, zorder=10)
# Axis configuration
ax.set(xticklabels=bases, title="Quality scores across all bases (" + self.encoding + " encoding)")
ax.set(ylim=0)
ax.xaxis.set_major_locator(ticker.MultipleLocator(base=2))
ax.yaxis.set_major_locator(ticker.MultipleLocator(base=2))
plt.xlabel('Position in read (bp)')
# Formats colour for background of graph
for line in ax.get_lines()[4::6]:
line.set_color('red')
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(10)
for i in range(0, len(bases)):
if i % 2 == 0:
plt.axvspan(i - 0.5, i + 0.5, facecolor='black', alpha=0.08, zorder=-100)
else:
plt.axvspan(i - 0.5, i + 0.5, facecolor='black', alpha=0, zorder=-100)
for i in range(0, int(ax.get_ylim()[1]) + 2):
if i <= 20:
plt.axhspan(i - 1, i, facecolor='red', alpha=0.3, zorder=-100)
elif 20 < i <= 28:
plt.axhspan(i - 1, i, facecolor='yellow', alpha=0.3, zorder=-100)
else:
plt.axhspan(i - 1, i, facecolor='green', alpha=0.3, zorder=-100)
plt.savefig(os.path.join(self.working_path, "graph.png"))
def mod_2_graph(self):
"""Creates graph for Per tile sequence quality"""
# Puts data in format for creating graph
df = | pd.DataFrame(self.data[ROWS], columns=["Tile", "Position in read (bp)", "value"]) | pandas.DataFrame |
import copy
import os
import numpy as np
import pandas as pd
import h5py
import pickle as pkl
# import glob
from pathlib import Path
# %% Hyperparameters
DATA_FILE_NAME = 'model_data'
RUN_NAME = 'run'
# data_filetype = 'pkl'
# %% Helper functions
def __unique_to_set(a, b):
"""
Return elements that are unique to container a and items that are unique to container b among the union of a and b.
Args:
a (container):
b (container):
Returns:
a_unique (list): elements that are contained in container a but not container b
b_unique (list): elements that are contained in container b but not container a
"""
def overlap(a, b):
return list(set(a) & set(b))
def difference(a, b):
return list(set(a) ^ set(b))
dif = difference(a, b)
a_unique = overlap(a, dif)
b_unique = overlap(b, dif)
return a_unique, b_unique
# %% Methods for outputing data
def update_output_table(table_params, table_path='output/param_table.csv', compare_exclude=[], column_labels=None,
overwrite_existing=True):
"""
Add row to run tracker table using entries of param_dict.
Args:
table_params (dict, OrderedDict): Parameters that will be put into the table
compare_exclude (list): Parameters that will be excluded when determining if two rows represent the same
run. For instance, if runs are identical except for the date when the run was done, then it might be
reasonable to consider the runs as being identical, reflected in the variable run_number. Hence,
one may want to put the date parameter key in compare_exclude.
table_path (string): The filepath for the table (including that table name, i.e. 'output/param_table.csv').
Windows style paths are okay.
column_labels (list): Contains the keys of params_table in the order in which they should be written in the
run tracker table.
overwrite_existing (bool): Whether or not to overwrite identical table entries or make a new row and
increment run_number.
Returns:
run_id (int): Unique identifier for the run.
"""
table_path = Path(table_path)
table_dir = table_path.parents[0]
Path.mkdir(table_dir, exist_ok=True)
for key in table_params:
table_params[key] = str(table_params[key])
run_id, run_number, param_df_updated, merge_indices = _get_updated_table(compare_exclude, table_params, table_path,
column_labels)
if run_number == 0 or not overwrite_existing:
param_df_updated.to_csv(table_path)
else:
run_id = np.max(merge_indices) # id for most recent run that matches table_params
return run_id
def make_dir_for_run(table_params, table_path='output/param_table.csv', compare_exclude=[],
overwrite_existing=True):
"""
Creates a directory for the run as well as the corresponding row in the parameter table.
Args:
table_params (dict, OrderedDict): Parameters that will be put into the table
table_path (string): The filepath for the table.
compare_exclude (list): Parameters that will be excluded when determining if two rows represent the same
run. For instance, if runs are identical except for the date when the run was done, then it might be
reasonable to consider the runs as being identical, reflected in the variable run_number. Hence,
one may want to put the date parameter key in compare_exclude.
overwrite_existing (bool): Whether or not to overwrite identical table entries or make a new row and
increment run_number.
Returns:
run_id (int): The unique identifier for the run
run_dir (str): The path to the output directory for the run
"""
run_id = update_output_table(table_params, table_path, compare_exclude, [], overwrite_existing)
table_path = Path(table_path)
table_dir = table_path.parents[0]
run_dir = Path(table_dir/(RUN_NAME+'_'+str(run_id)+'/'))
Path.mkdir(run_dir, exist_ok=True)
os.makedirs(run_dir, exist_ok=True)
return run_id, run_dir
def write_output(output, params, table_params, output_dir, overwrite=False, data_filetype='pickle'):
"""
Args:
output (dict): Dictionary that holds the output data
params (dict): Dictionary that holds the parameters
table_params (dict): Dictionary that holds the parameters in the output table
output_dir (string): Parent directory for output file. The output file name is DATA_FILE_NAME.pkl or
DATA_FILE_NAME.h5
Returns:
"""
output_dir = Path(output_dir)
output_file = (output_dir/DATA_FILE_NAME).with_suffix('.pkl')
print()
print("Attempting to write data to "+str(Path.cwd() / output_file ))
print()
try:
output_dir.mkdir(parents=True)
except (OSError, FileExistsError):
if overwrite:
print("Warning: existing data directory overwritten.")
else:
print("Data directory already exists. Not writing output.")
return
if data_filetype == 'hdf5':
with h5py.File(output_dir, "w") as fid:
param_grp = fid.create_group("parameters")
param_table_grp = fid.create_group("table_parameters")
out_grp = fid.create_group("output")
for key in params:
if params[key] is not None:
param_grp.create_dataset(key, data=params[key])
for key in table_params:
if table_params[key] is not None:
param_table_grp.create_dataset(key, data=table_params[key])
for key in output:
if output[key] is not None:
out_grp.create_dataset(key, data=output[key])
elif data_filetype == 'pickle':
data = dict(parameters=params, table_parameters=table_params, output=output)
with open(output_file, "wb") as fid:
pkl.dump(data, fid, protocol=4)
print("Done. Data written.")
def save_model(table_params, table_path, model_output, params, compare_exclude=[], columns=None,
overwrite_existing=False, data_filetype='pickle'):
"""
Creates an entry in the output_table and saves the output in the corresponding directory. Basically just a
wrapper to call update_output_table and then write_output.
Args:
table_params (dict, OrderedDict): Parameters that will be put into the table
compare_exclude (list): Parameters that will be excluded when determining if two rows represent the same
run. For instance, if runs are identical except for the date when the run was done, then it might be
reasonable to consider the runs as being identical, reflected in the variable run_number. Hence,
one may want to put the date parameter key in compare_exclude.
table_path (string): The filepath for the table.
model_output (dict): Dictionary that holds the output data
params (dict): Dictionary that holds the parameters
columns (list): Contains the keys of params_table in the order in which they should be written in the
output table.
overwrite_existing (bool): Whether or not to overwrite identical table entries or make a new row and
increment run_number.
output_path (string): Filepath for output file
data_filetype (str): Filetype for data to be written in. Currently only hdf5 is supported.
Returns:
"""
run_id = update_output_table(table_params, table_path, compare_exclude, columns, overwrite_existing)
table_path = Path(table_path)
table_dir = table_path.parents[0]
if data_filetype == 'hdf5':
file_name = DATA_FILE_NAME+'.h5'
elif data_filetype == 'pickle':
file_name = DATA_FILE_NAME+'.pkl'
else:
raise ValueError('data_filetype option not recognized.')
output_dir = table_dir / (RUN_NAME+'_'+str(run_id))
output_path = output_dir / file_name
params.update(dict(table_path=table_path, output_dir=output_dir, run_id=run_id))
write_output(model_output, params, table_params, output_path, overwrite_existing, data_filetype)
return run_id, output_path
# Todo: Build in support for nested dictionaries / groups
def hdf5group_to_dictionary(h5grp):
d = {}
for key in h5grp:
d[key] = h5grp[key].value
return d
# %% Methods for checking for run existence and getting location
def run_with_id_exists(run_id, table_dir='output', only_directory_ok=False):
"""
Given the name of the run, the ID of the run, and the directory of the output table, checks to see if the run
exists.
Args:
run_id ():
table_dir ():
only_directory_ok (bool): If True, this method returns True if the output directory exists, even if the output
files haven't been written to the output directory. If False, this method only returns True if the
corresponding output directory and output files exist.
Returns:
bool
"""
table_dir = Path(table_dir)
run_dir = Path(table_dir/(RUN_NAME+'_'+str(run_id)))
if only_directory_ok:
return Path.exists(run_dir)
else:
filename_no_ext = Path(run_dir/DATA_FILE_NAME)
filelist = list(filename_no_ext.glob('.*'))
return len(filelist) > 0
def get_dirs_and_ids_for_run(run_params, table_path='output/param_table.csv', compare_exclude=[], maximize=None):
"""
Parameters
----------
run_params : dict
Dictionary holding the parameters specifying the run
table_path : str
Path to the run tracker table
compare_exclude : list
list holding the parameters that should be excluded in specifying the run
maximize : Optional[str]
Parameter that should be maximized. Out of the runs that match run_params, return the ones that maximize the
argument maximize.
Returns
-------
List[Path]
Directories that match run_params and compare_exclude.
List[int]
Run Ids that match run_params and compare_exclude.
List[bool]
List of bools that correspond with the other two returned lists, with an entry being True if the output data
file is in the directory and False otherwise.
"""
table_path = Path(table_path)
table_dir = table_path.parents[0]
out = _get_updated_table(compare_exclude, run_params, table_path, None, maximize)
run_ids = out[0]
merge_ids = out[-1]
dirs = [table_dir / f"run_{x}" for x in merge_ids]
ids = [x for x in merge_ids]
output_exists = [Path.exists((Path(d)/DATA_FILE_NAME).with_suffix('.pkl')) for d in dirs]
# output_exists = [Path.exists(Path(d)/'training_completed_token') for d in dirs]
return dirs, ids, output_exists
# def run_with_params_exists(table_params, table_path='output/param_table.csv', compare_exclude=[],
# check_output_exist=True):
# """
# Given a set of parameters, check if a run matching this set exists.
#
# Args:
# table_params (dict, OrderedDict): Parameters that will be put into the table
# table_path (string): The filepath for the table.
# compare_exclude (list): Parameters that will be excluded when determining if two rows represent the same
# run. For instance, if runs are identical except for the date when the run was done, then it might be
# reasonable to consider the runs as being identical, reflected in the variable run_number. Hence,
# one may want to put the date parameter key in compare_exclude.
#
# Returns:
#
# """
#
#
# table_path = Path(table_path)
# table_dir = table_path.parents[0]
# out = _get_updated_table(compare_exclude, table_params, table_path)
# merge_ids = out[-1]
#
# dirs = get_dirs_for_run(run_params, table_path='output/param_table.csv', compare_exclude=[])
#
# file_check = True
# if check_output_exist:
# for dir in dirs:
# if not Path.exists(table_dir)
# return len(merge_ids) > 1
# %% Methods for loading data
def _get_updated_table(compare_exclude, table_params, table_path, column_labels=None, maximize=None):
"""
Core method for updating a parameter table.
Parameters
----------
compare_exclude : list
Parameters that should be excluded for comparisons with the run table
table_params : Dict-like
Parameters for the run
table_path : str
Path to the run table
column_labels (List[str]): Labels for the columns. Used to assert an order.
maximize : Optional[str]
Parameter that should be maximized. Out of the runs that match run_params, return the ones that maximize the
argument maximize.
Returns
-------
run_id : int
Unique identifier for the run
run_number : int
Indexes runs with the same parameters.
param_df_updated : DataFrame
The updated run table.
merge_ids : List[int]
List of unique identifiers of runs that corresponded with table_params (not including the new row)
"""
table_path = Path(table_path)
compare_exclude_copy = compare_exclude.copy()
# import ipdb; ipdb.set_trace()
if maximize is not None:
compare_exclude_copy.append(maximize)
if not table_path.exists(): # If the table hasn't been created yet.
run_id = 0
if not column_labels:
param_df_updated = pd.DataFrame(table_params, index=[run_id], dtype=object)
else:
param_df_updated = pd.DataFrame(table_params, index=[run_id], columns=column_labels, dtype=object)
param_df_updated['run_number'] = 0
param_df_updated = param_df_updated.fillna('na')
run_number = 0
merge_ids = []
return run_id, run_number, param_df_updated, merge_ids
if not column_labels:
column_labels = list(table_params.keys()).copy()
if 'run_number' not in column_labels:
column_labels.append('run_number') # To make sure run_number is the last column, unless otherwise specified
param_df = pd.read_csv(table_path, index_col=0, dtype=str)
new_cols = __unique_to_set(param_df.columns, column_labels)[1] # param_keys that don't yet belong to param_df
for key in new_cols:
param_df[key] = pd.Series('na', index=param_df.index)
unique_to_param_df = __unique_to_set(param_df.columns, column_labels)[0]
if not unique_to_param_df: # If column_labels is comprehensive
param_df = param_df[column_labels] # Reorder columns of param_df based on column_labels
run_id = np.max(np.array(param_df.index)) + 1
# import ipdb; ipdb.set_trace()
new_row = | pd.DataFrame(table_params, index=[run_id], dtype=str) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.