prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import time
import math
import json
import hashlib
import datetime
import pandas as pd
import numpy as np
from run_pyspark import PySparkMgr
graph_type = "loan_agent/"
def make_md5(x):
md5 = hashlib.md5()
md5.update(x.encode('utf-8'))
return md5.hexdigest()
def make_node_schema(entity_name, entity_df, comp_index_properties = None, mix_index_properties = None):
properties = {"propertyKeys": []}
for col in entity_df.columns:
if entity_df[col].dtype == np.float:
prop = {"name": col, "dataType": "Float", "cardinality": "SINGLE"}
elif entity_df[col].dtype == np.integer:
prop = {"name": col, "dataType": "Integer", "cardinality": "SINGLE"}
else:
prop = {"name": col, "dataType": "String", "cardinality": "SINGLE"}
properties["propertyKeys"].append(prop)
vertexLabels = {"vertexLabels": []}
vertexLabels["vertexLabels"].append({"name": entity_name})
vertexIndexes = {"vertexIndexes": []}
if comp_index_properties is not None:
for prop in comp_index_properties:
vertexIndexes["vertexIndexes"].append({
"name" : entity_name + "_" + prop + "_comp",
"propertyKeys" : [ prop ],
"composite" : True,
"unique" : False
})
if mix_index_properties is not None:
for prop in mix_index_properties:
vertexIndexes["vertexIndexes"].append({
"name" : entity_name + "_" + prop + "_mixed",
"propertyKeys" : [ prop ],
"composite" : False,
"unique" : False,
"mixedIndex" : "search"
})
vertexIndexes["vertexIndexes"].append({
"name" : entity_name + "_graph_label_mixed",
"propertyKeys" : [ "graph_label" ],
"composite" : False,
"unique" : False,
"mixedIndex" : "search"
})
return {**properties, **vertexLabels, **vertexIndexes}
def make_node_mapper(entity_name, entity_df):
entity_file = "gra_" + entity_name + ".csv"
vertexMap = {"vertexMap": {entity_file: {}}}
vertexMap["vertexMap"][entity_file] = {
"[VertexLabel]" : entity_name
}
for col in entity_df.columns:
vertexMap["vertexMap"][entity_file][col] = col
return vertexMap
def make_vertex_centric_schema(edge_name, index_property, direction, order):
if direction not in ["BOTH", "IN", "OUT"]:
print("direction should be in {}".format(["BOTH", "IN", "OUT"]))
return None
if order not in ["incr", "decr"]:
print("order should be in {}".format(["incr", "decr"]))
return None
vertexCentricIndexes = {"vertexCentricIndexes": []}
vertexCentricIndexes["vertexIndexes"].append({
"name" : edge_name + "_" + index_property,
"edge" : edge_name,
"propertyKeys" : [ index_property ],
"order": order,
"direction": direction
})
return vertexCentricIndexes
def make_edge_schema(relation_df = None, relation_comp_index_properties = None, relation_mix_index_properties = None):
properties = {"propertyKeys": []}
relation_columns = relation_df.columns.tolist()
if "Left" not in relation_columns or "Right" not in relation_columns:
print("relation df lacks Left and Right columns ")
for col in relation_df.columns:
if col in ["Left", "Right", "Type"]:
continue
if relation_df[col].dtype == np.float:
prop = {"name": col, "dataType": "Float", "cardinality": "SINGLE"}
elif relation_df[col].dtype == np.integer:
prop = {"name": col, "dataType": "Integer", "cardinality": "SINGLE"}
else:
prop = {"name": col, "dataType": "String", "cardinality": "SINGLE"}
properties["propertyKeys"].append(prop)
relation_names = relation_df["Type"].value_counts().index.tolist()
edgeLabels = {"edgeLabels": []}
for relation in relation_names:
edgeLabels["edgeLabels"].append({
"name": relation,
"multiplicity": "MULTI",
"unidirected": False
})
edgeIndexes = {"edgeIndexes": []}
for relation_name in relation_names:
if relation_comp_index_properties is not None:
for prop in relation_comp_index_properties:
edgeIndexes["edgeIndexes"].append({
"name": relation_name + "_" + prop + "_comp",
"propertyKeys": [ prop ],
"composite": True,
"unique": False,
"indexOnly": relation_name
})
if relation_mix_index_properties is not None:
for prop in relation_mix_index_properties:
edgeIndexes["edgeIndexes"].append({
"name" : relation_name + "_" + prop + "_mixed",
"propertyKeys": [ prop ],
"composite": False,
"unique": False,
"mixedIndex": "search",
"indexOnly": relation_name
})
return {**properties, **edgeLabels, **edgeIndexes}
def make_edge_mapper(entity_relations, relation_df=None, specific_relation=None):
edgeMap = {"edgeMap": {}}
for relation_name, entity_pairs in entity_relations.items():
if specific_relation is not None and relation_name != specific_relation:
continue
for pair in entity_pairs:
relation_file = "gra_" + relation_name + ".csv"
edge = {"[edge_left]": {"Left": pair[0]},
"[EdgeLabel]": relation_name,
"[edge_right]": {"Right": pair[1]}}
if relation_df is not None:
relation_columns = relation_df.columns.tolist()
if "Left" not in relation_columns or "Right" not in relation_columns:
print("relation df lacks Left and Right columns ")
for col in relation_df.columns:
if col in ["Left", "Right", "Type"]:
continue
edge[col] = col
edgeMap["edgeMap"][relation_file] = edge
return edgeMap
def dump_schema(schema, datamapper, folder):
if not os.path.exists(graph_type + folder):
os.makedirs(graph_type + folder)
f = open(graph_type + folder + "/schema.json", 'w')
f.write(json.dumps(schema))
f.close()
f = open(graph_type + folder + "/datamapper.json", 'w')
f.write(json.dumps(datamapper))
f.close()
spark_args = {}
pysparkmgr = PySparkMgr(spark_args)
_, spark, sc = pysparkmgr.start('xubin.xu')
# 申请表
apply_loan_df = spark.sql("select * from adm.adm_credit_apply_quota_doc").toPandas()
# 支用表
zhiyong_loan_df = spark.sql("select * from adm.adm_credit_loan_apply_doc").toPandas()
zhiyong_loan_df.quota_apply_id = zhiyong_loan_df.quota_apply_id.astype("int")
# 逾期表
overdue_sql = """select
*
from adm.adm_credit_apply_quota_doc t1
--逾期关联,存在一个客户不同时间多笔申请,不同申请会对应不同的逾期状态
--当前逾期天数和历史最大逾期天数
left join
(
select
quota_apply_id,
max(overdue_days_now) as overdue_days_now,
max(his_max_overdue_days) as his_max_overdue_days
from
(
select
c4.quota_apply_id,
c3.overdue_days_now,
c3.his_max_overdue_days
from
adm.adm_credit_loan_apply_doc c4
left join
(
select
c2.business_id,
max(overdue_days_now) as overdue_days_now,
max(overdue_day_calc) as his_max_overdue_days
from
(
select
c1.*,
(case when (overdue_day_calc>0 and latest_actual_repay_date is not null) then 0 else overdue_day_calc end) as overdue_days_now
FROM adm.adm_credit_rpt_risk_overdue_bill c1
) c2
group by c2.business_id
) c3
on c4.loan_no=c3.business_id
) c5
group by quota_apply_id
) t4
on t1.quota_apply_id=t4.quota_apply_id
--首逾天数:当前首逾天数,历史最大首逾天数----------------------------------------------------------
left join
(
select
quota_apply_id,
max(fpd) as fpd,
max(fpd_ever) as fpd_ever
from
(
select
a1.*,a2.*
from
adm.adm_credit_loan_apply_doc a1
left join
(
select
c1.business_id,
(case when (overdue_day_calc>0 and latest_actual_repay_date is null) then overdue_day_calc else 0 end) as fpd,--当前首逾天数
c1.overdue_day_calc as fpd_ever--历史首逾天数
from
adm.adm_credit_rpt_risk_overdue_bill c1
where periods=1
) a2
on a1.loan_no=a2.business_id
) a3
group by quota_apply_id
) t5
on t1.quota_apply_id=t5.quota_apply_id"""
overday_df = spark.sql(overdue_sql).toPandas()
# 构建借款者实体
def make_borrower_entity():
shouxin_zhiyong_df = pd.merge(apply_loan_df, zhiyong_loan_df[
["quota_apply_id", "apply_id", "apply_status_risk", "loan_status", "loan_amount", "repayment_principal"]],
how='left', on='quota_apply_id')
borrower_basic_df = shouxin_zhiyong_df[
["name", "uus_id", "employee_no", "identity_no", "sex", "age", "zociac", "educate_level", "marital_status",
"city", "access_role", "entry_date",
"resign_date", "on_job_status", "current_working_days", "uc_job_level_name", "store_city", "apply_id",
"team_code", "shop_code", "area_code", "marketing_code", "region_code"]]
borrower = shouxin_zhiyong_df.groupby("identity_no")
borrower_ext_df = pd.DataFrame([], columns=["identity_no", "累计贷款笔数", "未结清贷款笔数", "累计贷款金额", "当前贷款余额"])
idx = 0
for group, df in borrower:
loans_cnt = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == "放款成功")].apply_id.count()
unclosed_loans_cnt = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == "放款成功") & (
df.loan_status == "REPAYING")].apply_id.count()
loans_amt = df[(~ | pd.isnull(df.apply_id) | pandas.isnull |
import pytest
from SCNIC.general import simulate_correls
from SCNIC.correlation_analysis import df_to_correls, between_correls_from_tables, calculate_correlations, \
fastspar_correlation
import pandas as pd
from scipy.stats import pearsonr
from biom import load_table
from os import path
from numpy.testing import assert_allclose
import warnings
@pytest.fixture()
def biom_table1():
return simulate_correls()
@pytest.fixture()
def data_path():
return path.join(path.realpath(path.dirname(__file__)), 'data')
@pytest.fixture()
def fastspar_table(data_path):
return load_table(path.join(data_path, 'fake_data.biom'))
@pytest.fixture()
def correls_spar(data_path):
correls = pd.read_csv(path.join(data_path, 'fake_correls_spar.txt'), index_col=(0, 1), sep='\t',
dtype={'feature1': str, 'feature2': str})
new_index = pd.MultiIndex.from_tuples([(str(i), str(j)) for i, j in correls.index])
correls.index = new_index
return correls
# TODO: Induce between table correlations to try to detect
@pytest.fixture()
def biom_table2():
return simulate_correls()
@pytest.fixture()
def cor():
labels = ('otu_1', 'otu_2', 'otu_3')
data = [(1.0, .70, .01),
(.70, 1.0, .35),
(.01, .35, 1.0)]
return pd.DataFrame(data, index=labels, columns=labels)
@pytest.fixture()
def correls():
index = (('otu_1', 'otu_2'),
('otu_1', 'otu_3'),
('otu_2', 'otu_3'))
data = [.7, .01, .35]
return pd.DataFrame(data, index= | pd.MultiIndex.from_tuples(index) | pandas.MultiIndex.from_tuples |
import configparser
import csv
import glob
import hashlib
import json
import logging
import os
import sys
from datetime import datetime, timedelta, timezone
import numpy as np
import pandas as pd
from flask import flash, url_for
from flask_login import current_user
from flask_mail import Message
from thewarden import db, mail
from thewarden import mhp as mrh
from thewarden.models import Trades
from thewarden.pricing_engine.pricing import (fx_price_ondate,
multiple_price_grab, price_data,
price_data_fx, price_data_rt,
price_data_rt_full)
from thewarden.users.decorators import MWT, memoized, timing
# ---------------------------------------------------------
# Helper Functions start here
# ---------------------------------------------------------
# --------------------------------------------
# Read Global Variables from config(s)
# Include global variables and error handling
# --------------------------------------------
config = configparser.ConfigParser()
config.read('config.ini')
try:
RENEW_NAV = config['MAIN']['RENEW_NAV']
except KeyError:
RENEW_NAV = 10
logging.error("Could not find RENEW_NAV at config.ini. Defaulting to 60.")
try:
PORTFOLIO_MIN_SIZE_NAV = config['MAIN']['PORTFOLIO_MIN_SIZE_NAV']
except KeyError:
PORTFOLIO_MIN_SIZE_NAV = 5
logging.error("Could not find PORTFOLIO_MIN_SIZE_NAV at config.ini." +
" Defaulting to 5.")
# The following function is used when deploying the application
# In this case, the path changes and the below makes sure the
# correct path is used.
# Returns the base path of the application that can be later used with:
# file_path = os.path.join(current_path, file_path)
@memoized
def current_path():
# determine if application is a script file or frozen exe
if getattr(sys, 'frozen', False):
application_path = sys._MEIPASS
elif __file__:
application_path = os.path.dirname(os.path.abspath(__file__))
application_path = os.path.dirname(application_path)
application_path = os.path.dirname(application_path)
# The application_path above would return the location of:
# /thewarden/thewarden/users
# which is were the utils.py file for this function is located
# Make sure we go 2 levels up for the base application folder
return(application_path)
@MWT(timeout=20)
@timing
def cost_calculation(ticker, html_table=None):
# This function calculates the cost basis assuming 3 different methods
# FIFO, LIFO and avg. cost
# If html_table is set to either FIFO or LIFO, it will return
# an html table for this ticker
# Gets all transactions in local currency terms
df = transactions_fx()
df = df[(df.trade_asset_ticker == ticker)]
# Find current open position on asset
summary_table = df.groupby(['trade_asset_ticker', 'trade_operation'])[
["cash_value", "cash_value_fx", "trade_fees", "trade_quantity"]].sum()
open_position = summary_table.sum()['trade_quantity']
# Drop Deposits and Withdraws - keep only Buy and Sells
if open_position > 0:
df = df[df.trade_operation.str.match('B')]
elif open_position < 0:
df = df[df.trade_operation.str.match('S')]
# Let's return a dictionary for this user with FIFO, LIFO and Avg. Cost
cost_matrix = {}
# ---------------------------------------------------
# FIFO
# ---------------------------------------------------
fifo_df = df.sort_index(ascending=False)
fifo_df['acum_Q'] = fifo_df['trade_quantity'].cumsum()
fifo_df['acum_Q'] = np.where(fifo_df['acum_Q'] < open_position,
fifo_df['acum_Q'], open_position)
# Keep only the number of rows needed for open position
fifo_df = fifo_df.drop_duplicates(subset="acum_Q", keep='first')
fifo_df['Q'] = fifo_df['acum_Q'].diff()
fifo_df['Q'] = fifo_df['Q'].fillna(fifo_df['acum_Q'])
# if fifo_df['acum_Q'].count() == 1:
# fifo_df['Q'] = fifo_df['acum_Q']
# Adjust Cash Value only to account for needed position
fifo_df['adjusted_cv'] = fifo_df['cash_value_fx'] * fifo_df['Q'] /\
fifo_df['trade_quantity']
cost_matrix['FIFO'] = {}
cost_matrix['FIFO']['FIFO_cash'] = fifo_df['adjusted_cv'].sum()
cost_matrix['FIFO']['FIFO_quantity'] = open_position
cost_matrix['FIFO']['FIFO_count'] = int(fifo_df['trade_operation'].count())
cost_matrix['FIFO']['FIFO_average_cost'] = fifo_df['adjusted_cv'].sum()\
/ open_position
# ---------------------------------------------------
# LIFO
# ---------------------------------------------------
lifo_df = df.sort_index(ascending=True)
lifo_df['acum_Q'] = lifo_df['trade_quantity'].cumsum()
lifo_df['acum_Q'] = np.where(lifo_df['acum_Q'] < open_position,
lifo_df['acum_Q'], open_position)
# Keep only the number of rows needed for open position
lifo_df = lifo_df.drop_duplicates(subset="acum_Q", keep='first')
lifo_df['Q'] = lifo_df['acum_Q'].diff()
lifo_df['Q'] = lifo_df['Q'].fillna(lifo_df['acum_Q'])
# if lifo_df['acum_Q'].count() == 1:
# lifo_df['Q'] = lifo_df['acum_Q']
# Adjust Cash Value only to account for needed position
lifo_df['adjusted_cv'] = lifo_df['cash_value_fx'] * lifo_df['Q'] /\
lifo_df['trade_quantity']
cost_matrix['LIFO'] = {}
cost_matrix['LIFO']['LIFO_cash'] = lifo_df['adjusted_cv'].sum()
cost_matrix['LIFO']['LIFO_quantity'] = open_position
cost_matrix['LIFO']['LIFO_count'] = int(lifo_df['trade_operation'].count())
cost_matrix['LIFO']['LIFO_average_cost'] = lifo_df['adjusted_cv'].sum() / open_position
if html_table == "FIFO":
# Format the df into an HTML table to be served at main page
html = fifo_df[[
'trade_operation', 'Q', 'acum_Q',
'trade_price_fx', 'trade_fees_fx', 'cash_value_fx', 'adjusted_cv',
'trade_reference_id']]
if html_table == "LIFO":
html = lifo_df[[
'trade_operation', 'Q', 'acum_Q',
'trade_price_fx', 'trade_fees_fx', 'cash_value_fx', 'adjusted_cv',
'trade_reference_id']]
# Now format the HTML properly
if html_table:
fx = current_user.fx_rate_data()['symbol']
# Include a link to edit this transaction
html["trade_reference_id"] = "<a href='/edittransaction?reference_id=" +\
html['trade_reference_id'] +\
"'><i class='fas fa-edit'></i></a>"
html.index = pd.to_datetime(html.index).strftime('%Y-%m-%d')
# Include TOTAL row
html.loc['TOTAL'] = 0
# Need to add only some fields - strings can't be added for example
columns_sum = ['Q', 'trade_fees_fx', 'cash_value_fx',
'adjusted_cv']
for field in columns_sum:
html.loc['TOTAL', field] = html[field].sum()
# format numbers
html['acum_Q'] = abs(html['acum_Q'])
html['Q'] = abs(html['Q'])
html['acum_Q'] = html['acum_Q'].map('{:,.4f}'.format)
html['Q'] = html['Q'].map('{:,.4f}'.format)
html['trade_price_fx'] = html['trade_price_fx'].map('{:,.2f}'.format)
html['trade_fees_fx'] = html['trade_fees_fx'].map('{:,.2f}'.format)
html['cash_value_fx'] = html['cash_value_fx'].map('{:,.2f}'.format)
html['adjusted_cv'] = html['adjusted_cv'].map('{:,.2f}'.format)
html.loc['TOTAL', 'trade_operation'] = ''
html.loc['TOTAL', 'acum_Q'] = ''
html.loc['TOTAL', 'trade_price_fx'] = ''
html.loc['TOTAL', 'trade_reference_id'] = ''
html = html.rename(
columns={
'trade_operation': 'B/S',
'acum_Q': 'Q (acum)',
'trade_price_fx': 'Price (' + fx + ')',
'trade_fees_fx': 'Fees (' + fx + ')',
'cash_value_fx': 'Cash Flow (' + fx + ')',
'adjusted_cv': 'Adj CF (' + fx + ')',
'trade_reference_id': ' '
})
cost_matrix = html.to_html(
classes='table table-condensed table-striped small-text text-right',
escape=False, index_names=False, justify='right')
return (cost_matrix)
def to_epoch(in_date):
return str(int((in_date - datetime(1970, 1, 1)).total_seconds()))
def find_fx(row, fx=None):
# row.name is the date being passed
# row['trade_currency'] is the base fx (the one where the trade was included)
# Create an instance of PriceData:
price = fx_price_ondate(current_user.fx(), row['trade_currency'], row.name)
return price
@timing
def transactions_fx():
# Gets the transaction table and fills with fx information
# Note that it uses the currency exchange for the date of transaction
# Get all transactions from db and format
df = pd.read_sql_table('trades', db.engine)
df = df[(df.user_id == current_user.username)]
# df = df[(df.trade_operation == "B") | (df.trade_operation == "S")]
df['trade_date'] = pd.to_datetime(df['trade_date'])
df = df.set_index('trade_date')
# Ignore times in df to merge - keep only dates
df.index = df.index.floor('d')
df.index.rename('date', inplace=True)
# The current fx needs no conversion, set to 1
df[current_user.fx()] = 1
# Need to get currencies into the df in order to normalize
# let's load a list of currencies needed and merge
list_of_fx = df.trade_currency.unique().tolist()
# loop through currency list
for currency in list_of_fx:
if currency == current_user.fx():
continue
# Make a price request
df[currency] = df.apply(find_fx, axis=1)
# Now create a cash value in the preferred currency terms
df['fx'] = df.apply(lambda x: x[x['trade_currency']], axis=1)
df['cash_value_fx'] = df['cash_value'].astype(float) / df['fx'].astype(float)
df['trade_fees_fx'] = df['trade_fees'].astype(float) / df['fx'].astype(float)
df['trade_price_fx'] = df['trade_price'].astype(float) / df['fx'].astype(float)
return (df)
@memoized
def is_currency(id):
# Return true if id is in list of currencies
found = ([item for item in fx_list() if item[0] == id])
if found != []:
return True
return False
@MWT(timeout=2)
def list_tickers():
df = pd.read_sql_table('trades', db.engine)
df = df[(df.user_id == current_user.username)]
# remove the currencies from tickers
df['is_currency'] = df['trade_asset_ticker'].apply(is_currency)
df = df[df['is_currency'] == False]
return (df.trade_asset_ticker.unique().tolist())
# ---------------- PANDAS HELPER FUNCTION --------------------------
# This is a function to concatenate a function returning multiple columns into
# a dataframe.
def apply_and_concat(dataframe, field, func, column_names):
return pd.concat((
dataframe,
dataframe[field].apply(
lambda cell: pd.Series(func(cell), index=column_names))), axis=1)
# ---------------- PANDAS HELPER FUNCTION --------------------------
# Pandas helper function to unpack a dictionary stored within a
# single pandas column cells.
def df_unpack(df, column, fillna=None):
ret = None
if fillna is None:
ret = pd.concat([df, pd.DataFrame(
(d for idx, d in df[column].iteritems()))], axis=1)
del ret[column]
else:
ret = pd.concat([df, pd.DataFrame(
(d for idx, d in df[column].iteritems())).fillna(fillna)], axis=1)
del ret[column]
return ret
@MWT(timeout=2)
def positions():
# Method to create a user's position table
# Returns a df with the following information
# Ticker, name, quantity, small_pos
# THIS SHOULD CONTAIN THE STATIC FIELDS ONLY - no web requests
# It should be a light method to load quickly on the main page.
# Anything with web requests should be done on a separate function
# Get all transactions & group by ticker name and operation
df = transactions_fx()
summary_table = df.groupby(['trade_asset_ticker', 'trade_operation'])[
["trade_quantity",
"cash_value_fx",
"trade_fees_fx"]].sum()
# Now let's create our main dataframe with information for each ticker
list_of_tickers = list_tickers()
main_df = | pd.DataFrame({'trade_asset_ticker': list_of_tickers}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from sklearn.utils import shuffle as sk_shuffle
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.preprocessing import StandardScaler
from .rfpimp import oob_importances
class Data(object):
def __init__(self, shuffle=True, standardize=True, random_state=None):
self.shuffle = shuffle
self.standardize = standardize
self.random_state = random_state
self.X = None
self.y = None
self.columns = None
def fit_transform(self, X, y=None):
if isinstance(X, np.ndarray):
self.X = pd.DataFrame(X)
if y is not None:
self.y = | pd.Series(y) | pandas.Series |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64'),
columns=wrapper.columns
)
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
ma = mapped_array_nosort
ma_grouped = mapped_array_nosort_grouped
else:
ma = mapped_array
ma_grouped = mapped_array_grouped
np.testing.assert_array_equal(
ma['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
ma['a'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
np.testing.assert_array_equal(
ma['b'].id_arr,
np.array([3, 4, 5])
)
np.testing.assert_array_equal(
ma['b'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'a']].id_arr,
np.array([0, 1, 2, 0, 1, 2])
)
np.testing.assert_array_equal(
ma[['a', 'a']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'b']].id_arr,
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
ma[['a', 'b']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = ma.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped['g1'].wrapper.ndim == 2
assert ma_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert ma_grouped['g2'].wrapper.ndim == 2
assert ma_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped[['g1']].wrapper.ndim == 2
assert ma_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert ma_grouped[['g1', 'g2']].wrapper.ndim == 2
assert ma_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_magic(self):
a = vbt.MappedArray(
wrapper,
records_arr['some_field1'],
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
a_inv = vbt.MappedArray(
wrapper,
records_arr['some_field1'][::-1],
records_arr['col'][::-1],
id_arr=records_arr['id'][::-1],
idx_arr=records_arr['idx'][::-1]
)
b = records_arr['some_field2']
a_bool = vbt.MappedArray(
wrapper,
records_arr['some_field1'] > np.mean(records_arr['some_field1']),
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
b_bool = records_arr['some_field2'] > np.mean(records_arr['some_field2'])
assert a ** a == a ** 2
with pytest.raises(Exception):
_ = a * a_inv
# binary ops
# comparison ops
np.testing.assert_array_equal((a == b).values, a.values == b)
np.testing.assert_array_equal((a != b).values, a.values != b)
np.testing.assert_array_equal((a < b).values, a.values < b)
np.testing.assert_array_equal((a > b).values, a.values > b)
np.testing.assert_array_equal((a <= b).values, a.values <= b)
np.testing.assert_array_equal((a >= b).values, a.values >= b)
# arithmetic ops
np.testing.assert_array_equal((a + b).values, a.values + b)
np.testing.assert_array_equal((a - b).values, a.values - b)
np.testing.assert_array_equal((a * b).values, a.values * b)
np.testing.assert_array_equal((a ** b).values, a.values ** b)
np.testing.assert_array_equal((a % b).values, a.values % b)
np.testing.assert_array_equal((a // b).values, a.values // b)
np.testing.assert_array_equal((a / b).values, a.values / b)
# __r*__ is only called if the left object does not have an __*__ method
np.testing.assert_array_equal((10 + a).values, 10 + a.values)
np.testing.assert_array_equal((10 - a).values, 10 - a.values)
np.testing.assert_array_equal((10 * a).values, 10 * a.values)
np.testing.assert_array_equal((10 ** a).values, 10 ** a.values)
np.testing.assert_array_equal((10 % a).values, 10 % a.values)
np.testing.assert_array_equal((10 // a).values, 10 // a.values)
np.testing.assert_array_equal((10 / a).values, 10 / a.values)
# mask ops
np.testing.assert_array_equal((a_bool & b_bool).values, a_bool.values & b_bool)
np.testing.assert_array_equal((a_bool | b_bool).values, a_bool.values | b_bool)
np.testing.assert_array_equal((a_bool ^ b_bool).values, a_bool.values ^ b_bool)
np.testing.assert_array_equal((True & a_bool).values, True & a_bool.values)
np.testing.assert_array_equal((True | a_bool).values, True | a_bool.values)
np.testing.assert_array_equal((True ^ a_bool).values, True ^ a_bool.values)
# unary ops
np.testing.assert_array_equal((-a).values, -a.values)
np.testing.assert_array_equal((+a).values, +a.values)
np.testing.assert_array_equal((abs(-a)).values, abs((-a.values)))
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Mean', 'Std', 'Min', 'Median', 'Max', 'Min Index', 'Max Index'
], dtype='object')
pd.testing.assert_series_equal(
mapped_array.stats(),
pd.Series([
'x', 'z', | pd.Timedelta('3 days 00:00:00') | pandas.Timedelta |
import json
import numpy as np
import pandas as pd
from numba import jit, njit, prange
from sklearn.preprocessing import scale
def load_clades(clades_path="data/clades.json", size=30):
with open(clades_path, "r") as f:
clades = json.load(f)
clade_dict = {}
clade_sizes = {}
for k, v in clades.items():
if v["size"] > size:
clade_dict[k] = v["inds"]
clade_sizes[k] = v["size"]
clade_sizes = pd.Series(clade_sizes).sort_values(ascending=False)
return ((clade_dict, clade_sizes))
def load_NPP_mat(path="data/NPP.tsv", scale_mat=False):
'''
Loads the NPP and turns it into a matrix
:param path: path to load from
:return: a numpy 19891x881 matrix - rows are genes, columns are species and the list of genes
'''
f = open(path, 'r')
NPP = | pd.read_table(f, delimiter="\t", index_col=0) | pandas.read_table |
import streamlit as st
import cv2
import numpy as np
from lxml import etree
import pytesseract
from pytesseract import Output
import pandas as pd
from mmdetection.mmdet.apis import inference_detector, show_result, init_detector
# import mmcv
# import os
# import numpy as np
# from PIL import Image
# from mmdet.apis import init_detector, inference_detector, show_result_pyplot
# from pathlib import Path
#### line_detection.py
# Input : Image
# Output : hor,ver
def line_detection(image):
print("Detecting lines")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
bw = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15, 1)
bw = cv2.bitwise_not(bw)
## To visualize image after thresholding ##
# print("Thresholding result")
# cv2_imshow(bw)
# cv2.waitKey(0)
###########################################
horizontal = bw.copy()
vertical = bw.copy()
img = image.copy()
# [horizontal lines]
# Create structure element for extracting horizontal lines through morphology operations
horizontalStructure = cv2.getStructuringElement(cv2.MORPH_RECT, (15, 1))
# Apply morphology operations
horizontal = cv2.erode(horizontal, horizontalStructure)
horizontal = cv2.dilate(horizontal, horizontalStructure)
horizontal = cv2.dilate(horizontal, (1,1), iterations=5)
horizontal = cv2.erode(horizontal, (1,1), iterations=5)
## Uncomment to visualize highlighted Horizontal lines
# print("Highligted horizontal lines")
# cv2_imshow(horizontal)
# cv2.waitKey(0)
# HoughlinesP function to detect horizontal lines
hor_lines = cv2.HoughLinesP(horizontal,rho=1,theta=np.pi/180,threshold=100,minLineLength=30,maxLineGap=3)
if hor_lines is None:
return None,None
temp_line = []
for line in hor_lines:
for x1,y1,x2,y2 in line:
temp_line.append([x1,y1-5,x2,y2-5])
# Sorting the list of detected lines by Y1
hor_lines = sorted(temp_line,key=lambda x: x[1])
## Uncomment this part to visualize the lines detected on the image ##
# print(len(hor_lines))
# for x1, y1, x2, y2 in hor_lines:
# cv2.line(image, (x1,y1), (x2,y2), (0, 255, 0), 1)
# print(image.shape)
# print("Visualize lines")
# cv2_imshow(image)
# cv2.waitKey(0)
####################################################################
## Selection of best lines from all the horizontal lines detected ##
lasty1 = -111111
lines_x1 = []
lines_x2 = []
hor = []
i=0
for x1,y1,x2,y2 in hor_lines:
if y1 >= lasty1 and y1 <= lasty1 + 10:
lines_x1.append(x1)
lines_x2.append(x2)
else:
if (i != 0 and len(lines_x1) is not 0):
hor.append([min(lines_x1),lasty1,max(lines_x2),lasty1])
lasty1 = y1
lines_x1 = []
lines_x2 = []
lines_x1.append(x1)
lines_x2.append(x2)
i+=1
hor.append([min(lines_x1),lasty1,max(lines_x2),lasty1])
#####################################################################
# [vertical lines]
# Create structure element for extracting vertical lines through morphology operations
verticalStructure = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 15))
# Apply morphology operations
vertical = cv2.erode(vertical, verticalStructure)
vertical = cv2.dilate(vertical, verticalStructure)
vertical = cv2.dilate(vertical, (1,1), iterations=8)
vertical = cv2.erode(vertical, (1,1), iterations=7)
######## Preprocessing Vertical Lines ###############
# print("vertical")
# cv2_imshow(vertical)
# cv2.waitKey(0)
#####################################################
# HoughlinesP function to detect vertical lines
# ver_lines = cv2.HoughLinesP(vertical,rho=1,theta=np.pi/180,threshold=20,minLineLength=20,maxLineGap=2)
ver_lines = cv2.HoughLinesP(vertical, 1, np.pi/180, 20, np.array([]), 20, 2)
if ver_lines is None:
return None,None
temp_line = []
for line in ver_lines:
for x1,y1,x2,y2 in line:
temp_line.append([x1,y1,x2,y2])
# Sorting the list of detected lines by X1
ver_lines = sorted(temp_line,key=lambda x: x[0])
## Uncomment this part to visualize the lines detected on the image ##
# print(len(ver_lines))
# for x1, y1, x2, y2 in ver_lines:
# cv2.line(image, (x1,y1-5), (x2,y2-5), (0, 255, 0), 1)
# print(image.shape)
# cv2.imshow("image",image)
# cv2.waitKey(0)
####################################################################
## Selection of best lines from all the vertical lines detected ##
lastx1 = -111111
lines_y1 = []
lines_y2 = []
ver = []
count = 0
lasty1 = -11111
lasty2 = -11111
for x1,y1,x2,y2 in ver_lines:
if x1 >= lastx1 and x1 <= lastx1 + 15 and not (((min(y1,y2)<min(lasty1,lasty2)-20 or min(y1,y2)<min(lasty1,lasty2)+20)) and ((max(y1,y2)<max(lasty1,lasty2)-20 or max(y1,y2)<max(lasty1,lasty2)+20))):
lines_y1.append(y1)
lines_y2.append(y2)
# lasty1 = y1
# lasty2 = y2
else:
if (count != 0 and len(lines_y1) is not 0):
ver.append([lastx1,min(lines_y2)-5,lastx1,max(lines_y1)-5])
lastx1 = x1
lines_y1 = []
lines_y2 = []
lines_y1.append(y1)
lines_y2.append(y2)
count += 1
lasty1 = -11111
lasty2 = -11111
ver.append([lastx1,min(lines_y2)-5,lastx1,max(lines_y1)-5])
#################################################################
############ Visualization of Lines After Post Processing ############
for x1, y1, x2, y2 in ver:
cv2.line(img, (x1,y1), (x2,y2), (0, 255, 0), 1)
for x1, y1, x2, y2 in hor:
cv2.line(img, (x1,y1), (x2,y2), (0, 255, 0), 1)
# print("Lines after post processing")
# cv2.imshow("image",img)
# cv2.waitKey(0)
#######################################################################
return hor,ver
# line_detection(cv2.imread('path to image'))
#### borderFunc.py
################## Functions required for Border table Recognition #################
## Return the intersection of lines only if intersection is present ##
# Input : x1, y1, x2, y2, x3, y3, x4, y4 (1: vertical, 2: horizontal)
# Output : (x,y) Intersection point
def line_intersection(x1, y1, x2, y2, x3, y3, x4, y4):
# print(x1, y1, x2, y2)
# print(x3, y3, x4, y4)
if((x1>= x3-5 or x1>= x3+5) and (x1 <= x4+5 or x1 <= x4-5) and (y3+8>=min(y1,y2) or y3-5>=min(y1,y2)) and y3<=max(y1,y2)+5):
return x1,y3
## main extraction function ##
# Input : Image, Decision parameter(1/0),lines for borderless (only of decision parameter is 0)
# Output : Array of cells
def extract_table(table_body,__line__,lines=None):
# Deciding variable
if(__line__ == 1 ):
# Check if table image is bordered or borderless
temp_lines_hor, temp_lines_ver = line_detection(table_body)
else:
temp_lines_hor, temp_lines_ver = lines
if len(temp_lines_hor)==0 or len(temp_lines_ver)==0:
print("Either Horizontal Or Vertical Lines Not Detected")
return None
table = table_body.copy()
x = 0
y = 0
k = 0
points = []
print("[Table status] : Extracting table")
# Remove same lines detected closer
for x1, y1, x2, y2 in temp_lines_ver:
point = []
for x3, y3, x4, y4 in temp_lines_hor:
try:
k += 1
x, y = line_intersection(x1, y1, x2, y2, x3, y3, x4, y4)
point.append([x, y])
except:
continue
points.append(point)
for point in points:
for x,y in point:
cv2.line(table,(x,y),(x,y),(0,0,255),8)
## intersection
# cv2_imshow(table)
# cv2.waitKey(0)
# boxno = -1
box = []
flag = 1
lastCache = []
## creating bounding boxes of cells from the points detected
## This is still under work and might fail on some images
for i, row in enumerate(points):
limitj = len(row)
currentVala = []
for j, col in enumerate(row):
if (j == limitj-1):
break
if (i == 0):
nextcol = row[j+1]
lastCache.append([col[0], col[1], nextcol[0], nextcol[1],9999,9999,9999,9999])
else:
nextcol = row[j+1]
currentVala.append([col[0], col[1], nextcol[0], nextcol[1], 9999, 9999, 9999, 9999])
# Matching
flag = 1
index = []
for k, last in enumerate(lastCache):
if (col[1] == last[1]) and lastCache[k][4] == 9999:
lastCache[k][4] = col[0]
lastCache[k][5] = col[1]
if lastCache[k][4] != 9999 and lastCache[k][6] != 9999:
box.append(lastCache[k])
index.append(k)
flag = 1
if (nextcol[1] == last[3]) and lastCache[k][6] == 9999:
lastCache[k][6] = nextcol[0]
lastCache[k][7] = nextcol[1]
if lastCache[k][4] != 9999 and lastCache[k][6] != 9999:
box.append(lastCache[k])
index.append(k)
flag = 1
if len(lastCache) !=0:
if lastCache[k][4] == 9999 or lastCache[k][6] == 9999:
flag = 0
# print(index)
for k in index:
lastCache.pop(k)
# tranfsering
if flag == 0:
for last in lastCache:
if last[4] == 9999 or last[6] == 9999:
currentVala.append(last)
if(i!=0):
lastCache = currentVala
## Visualizing the cells ##
# count = 1
# for i in box:
# cv2.rectangle(table_body, (i[0], i[1]), (i[6], i[7]), (int(i[7]%255),0,int(i[0]%255)), 2)
# # count+=1
# cv2.imshow("cells",table_body)
# cv2.waitKey(0)
############################
return box
# extract_table(cv2.imread("E:\\KSK\\KSK ML\\KSK PAPERS\\TabXNet\\For Git\\images\\table.PNG"),1,lines=None)
def findX(X,x):
return X.index(x)
def findY(Y,y):
return Y.index(y)
def span(box,X,Y):
start_col = findX(X,box[0]) ## x1
end_col = findX(X,box[4])-1 ## x3
start_row = findY(Y,box[1]) ## y1
end_row = findY(Y,box[3])-1 ## y2
# print(end_col,end_row,start_col,start_row)
return end_col,end_row,start_col,start_row
def extractText(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh1 = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)
# cv2_imshow(thresh1)
rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1))
dilation = cv2.dilate(thresh1, rect_kernel, iterations = 2)
contours, _ = cv2.findContours(dilation, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
im2 = img.copy()
mx,my,mw,mh = float('Inf'),float('Inf'),-1,-1
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
# print(im2.shape)
if x<2 or y<2 or (x+w>=im2.shape[1]-1 and y+h>=im2.shape[0]-1) or w>=im2.shape[1]-1 or h>=im2.shape[0]-1:
continue
if x<mx:
mx = x
if y<my:
my = y
if x+w>mw:
mw = x+w
if y+h>mh:
mh = y+h
# print(x, y, w, h)
if mx !=float('Inf') and my !=float('Inf'):
# Drawing a rectangle on copied image
# rect = cv2.rectangle(im2, (mx+1, my), (mw-2, mh-2), (0, 255, 0), 1)
# cv2_imshow(im2)
return mx,my,mw,mh
else :
return None
#### blessFunc.py
## Input : roi of one cell
## Output : bounding box for the text in that cell
def extractTextBless(img):
return_arr = []
h,w=img.shape[0:2]
base_size=h+14,w+14,3
img_np = np.zeros(base_size,dtype=np.uint8)
cv2.rectangle(img_np,(0,0),(w+14,h+14),(255,255,255),30)
img_np[7:h+7,7:w+7]=img
# cv2_imshow(img_np)
gray = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
# blur = cv2.GaussianBlur(gray,(5,5),0)
ret, thresh1 = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)
rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 1))
dilation = cv2.dilate(thresh1, rect_kernel, iterations = 2)
# cv2_imshow(dilation)
contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
for cnt in (contours):
if cv2.contourArea(cnt) < 20:
continue
x, y, w, h = cv2.boundingRect(cnt)
if(h<6) or w<4 or h/img.shape[0]>0.95 or h>30:
continue
return_arr.append([x-7, y-7, w, h])
return return_arr
## Input : Roi of Table , Orignal Image, Cells Detected
## Output : Returns XML element which has contains bounding box of textchunks
def borderless(table, image, res_cells):
print("[Table status] : Processing borderless table")
cells = []
x_lines = []
y_lines = []
table[0],table[1],table[2],table[3] = table[0]-15,table[1]-15,table[2]+15,table[3]+15
for cell in res_cells:
if cell[0]>table[0]-50 and cell[1]>table[1]-50 and cell[2]<table[2]+50 and cell[3]<table[3]+50:
cells.append(cell)
# print(cell)
cells = sorted(cells,key=lambda x: x[3])
row = []
last = -1111
row.append(table[1])
y_lines.append([table[0],table[1],table[2],table[1]])
temp = -1111
prev = None
im2 = image.copy()
for i, cell in enumerate(cells):
if i == 0:
last = cell[1]
temp = cell[3]
elif (cell[1]<last+15 and cell[1]>last-15) or (cell[3]<temp+15 and cell[3]>temp-15):
if cell[3]>temp:
temp = cell[3]
else:
last = cell[1]
if last > temp:
row.append((last+temp)//2)
if prev is not None:
if ((last+temp)//2) < prev + 10 or ((last+temp)//2) < prev - 10:
row.pop()
prev = (last+temp)//2
temp = cell[3]
row.append(table[3]+50)
i=1
rows = []
for r in range(len(row)):
rows.append([])
final_rows = rows
maxr = -111
# print(len(row))
for cell in cells:
if cell[3]<row[i]:
rows[i-1].append(cell)
else:
i+=1
rows[i-1].append(cell)
# print(row)
for n,r1 in enumerate(rows):
if n==len(rows):
r1 = r1[:-1]
# print(r1)
r1 = sorted(r1,key=lambda x:x[0])
prevr = None
for no,r in enumerate(r1):
if prevr is not None:
# print(r[0],prevr[0])
if (r[0]<=prevr[0]+5 and r[0]>=prevr[0]-5) or (r[2]<=prevr[2]+5 and r[2]>=prevr[2]-5):
if r[4]<prevr[4]:
r1.pop(no)
else:
r1.pop(no-1)
prevr = r
# print(len(r1))
final_rows[n] = r1
lasty = []
for x in range(len(final_rows)):
lasty.append([99999999,0])
prev = None
for n,r1 in enumerate(final_rows):
for r in r1:
if prev is None:
prev = r
else:
if r[1]<prev[3]:
continue
if r[1]<lasty[n][0]:
lasty[n][0] = r[1]
if r[3]>lasty[n][1]:
lasty[n][1] = r[3]
# print("last y:",lasty)
row = []
row.append(table[1])
prev = None
pr = None
for x in range(len(lasty)-1):
if x==0 and prev==None:
prev = lasty[x]
else:
if pr is not None:
if abs(((lasty[x][0]+prev[1])//2)-pr)<=10:
row.pop()
row.append((lasty[x][0]+prev[1])//2)
else:
row.append((lasty[x][0]+prev[1])//2)
else:
row.append((lasty[x][0]+prev[1])//2)
pr = (lasty[x][0]+prev[1])//2
prev = lasty[x]
row.append(table[3])
maxr = 0
for r2 in final_rows:
# print(r2)
if len(r2)>maxr:
maxr = len(r2)
lastx = []
for n in range(maxr):
lastx.append([999999999,0])
for r2 in final_rows:
if len(r2)==maxr:
for n,col in enumerate(r2):
# print(col)
if col[2]>lastx[n][1]:
lastx[n][1] = col[2]
if col[0]<lastx[n][0]:
lastx[n][0] = col[0]
print(lastx)
for r2 in final_rows:
if len(r2)!=0:
r=0
for n,col in enumerate(r2):
while r!=len(r2)-1 and (lastx[n][0]>r2[r][0]):
r +=1
if n != 0:
if r2[r-1][0] > lastx[n-1][1]:
if r2[r-1][0]<lastx[n][0]:
lastx[n][0] = r2[r-1][0]
for r2 in final_rows:
for n,col in enumerate(r2):
if n != len(r2)-1:
if col[2] < lastx[n+1][0]:
if col[2]>lastx[n][1]:
lastx[n][1] = col[2]
# print(lastx)
col = np.zeros(maxr+1)
col[0] = table[0]
prev = 0
i = 1
for x in range(len(lastx)):
if x==0:
prev = lastx[x]
else:
col[i] = (lastx[x][0]+prev[1])//2
i+=1
prev = lastx[x]
col = col.astype(int)
col[maxr] = table[2]
_row_ = sorted(row, key=lambda x:x)
_col_ = sorted(col, key=lambda x:x)
print("_row_ :", _row_)
print("_col_ :", _col_)
for no,c in enumerate(_col_):
x_lines.append([c,table[1],c,table[3]])
cv2.line(im2,(c,table[1]),(c,table[3]),(255,0,0),1)
for no,c in enumerate(_row_):
y_lines.append([table[0],c,table[2],c])
cv2.line(im2,(table[0],c),(table[2],c),(255,0,0),1)
cv2_imshow(im2)
print("table:",table)
# for r in row:
# cv2.line(im2,(r,table[1]),(r,table[3]),(0,255,0),1)
# for c in col:
# cv2.line(im2,(c,table[1]),(c,table[3]),(0,255,0),1)
# PERFORM OCR
d = pytesseract.image_to_data(img, output_type=Output.DICT)
ocr = | pd.DataFrame.from_dict(d) | pandas.DataFrame.from_dict |
import numpy as np
import sys, os, glob, pathlib, csv, importlib
import pandas as pd
#from Geomodel_parameters import egen_project
def egen_paths(geomodeller, model, data=None):
"""define paths for different parts of the process"""
# arg path inputs need to be raw string to avoid escape issue eg. "\U" in C:\Users etc
global path_geomodeller1
global path_geomodeller2
global path_model
global path_to_model
global path_output
global path_data
path_geomodeller1 = f'{geomodeller}/bin'
path_geomodeller1 = path_geomodeller1.replace("\\", "/")
path_geomodeller2 = f'{geomodeller}/bin/server'
path_geomodeller2 = path_geomodeller2.replace("\\", "/")
path_model = model
path_to_model = model
# path_model = "%r" % model
path_model = path_model.replace("\\", "/")
path_to_model = path_to_model.replace("\\", "/")
path_output = f'{model}/output' # task files will be stored in output directory. ?Separate one - unnecessary at this point
path_output = path_output.replace("\\", "/")
if data is not None:
path_data = data.replace("\\", "/")
else:
data = None
return #path_geomodeller1, path_geomodeller2, path_model, path_to_model, path_output
def egen_xml_to_task(path_to_model, model_xml, model_task):
"""generate task file from xml"""
# emulates exporter.task
# path_model; path_outputs should be prior set with egen paths
# paths areas input, so can be explicit or relative.
import os
global g_model_xml # name of model without .xml for naming use in exports etc.
if model_xml.find(".xml") == -1:
model_xml = model_xml + str(".xml")
if model_xml.find(".xml") != -1:
g_model_xml = model_xml
else:
g_model_xml = model_xml.replace(".xml", "")
# if not os.path.exists(path_output):
# os.makedirs(path_output)
orig_task = open(f'{path_to_model}/xml_to_task.task', "w")
task = f'''GeomodellerTask {{
WriteBatchFile {{
filename: "{path_to_model}/{model_xml}"
Task_Name: "{path_to_model}/{model_task}"
convertSection_InterfacesTo3D: true
convertSection_FoliationTo3D: true
exportBoreholesToCSV: false
csv_path: "{path_to_model}/output/"
exportToGeomodellerTempDirectory: false
}}
}}'''
orig_task.write(task)
orig_task.close()
return
def egen_calc_original(model_xml):
'''calculate original model'''
global g_model_xml
g_model_xml = model_xml
orig_task = open(f'{path_output}/calc_{model_xml}', "w")
task = f'''GeomodellerTask {{
OpenProjectNoGUI {{
filename: "{path_model}/{g_model_xml}"
}}
}}
GeomodellerTask {{
ComputeModel {{
SeriesList {{
node: "all"
}}
FaultList {{
node: "all"
}}
SectionList {{
node: "all"
}}
}}
}}
GeomodellerTask {{
SaveProjectAs {{
filename: "{path_model}/{g_model_xml}"
log: "projectsave.rpt"
}}
}}
GeomodellerTask {{
CloseProjectNoGUI {{
}}
}}
'''
orig_task.write(task)
orig_task.close()
return
def egen_orig_model_voxet(path_to_model, model_xml, nx, ny, nz, litho=True, scalar=False, scalar_grads=False):
"""task for exporting voxets from the original model - litho, scalar, gradients etc"""
"""assumes you want the same voxel parameters for all voxets"""
"""if different cell sizes are needed, repeat this function with the appropriate params and voxet"""
if not os.path.exists(f'''{path_to_model}/voxets'''):
os.makedirs(f'''{path_to_model}/voxets''')
open_task = f'''GeomodellerTask {{
OpenProjectNoGUI {{
filename: "{path_to_model}/{model_xml}"
}}
}}\n'''
if litho is True:
# save out lithology voxet
task1 = f'''GeomodellerTask {{
SaveLithologyVoxet {{
nx:{nx}
ny:{ny}
nz:{nz}
LithologyVoxetFileStub: "{path_to_model}/voxets/orig_gocad_litho"
}}
}}\n'''
else:
task1 = ""
if scalar is True:
task2 = f'''GeomodellerTask {{
SavePotentialGradientVoxet {{
nx:{nx}
ny:{ny}
nz:{nz}
Just_Gradients: false
VoxetFileStub: "{path_to_model}/voxets/orig_gocad_scalar"
}}
}}\n'''
else:
task2 = ""
if scalar_grads is True:
task3 = f'''GeomodellerTask {{
SavePotentialGradientVoxet {{
nx:{nx}
ny:{ny}
nz:{nz}
Just_Gradients: true
VoxetFileStub: "{path_to_model}/voxets/orig_gocad_scalar_grads"
}}
}}\n'''
else:
task3 = ""
close_task = '''GeomodellerTask {
CloseProjectNoGUI {
}
}'''
orig_model_voxet = open(f'{path_to_model}/orig_model_voxet.task', "w")
orig_model_voxet.write(open_task + task1 + task2 + task3 + close_task)
orig_model_voxet.close()
return
def egen_create_batch(*tasks): # need to fix how the tasks args can be added to the batch without explicit indexing
'''create batch file .bat for windows for correct sequence of task file execution'''
# create a switch for linux - .sh and path setting will be different
task_list = [None] * len(tasks)
batch = f"SET PATH=%PATH%;{path_geomodeller1}\n"
for i in range(0, len(tasks)):
batch = batch + "geomodellerbatch " + tasks[i] + "\n"
egen_batch = open(f'{path_model}/egen_batch.bat', "w")
egen_batch.write(batch)
egen_batch.close()
return
def egen_create_batch_auto(tasks, run): # need to fix how the tasks args can be added to the batch without explicit indexing
'''create batch file .bat for windows for correct sequence of task file execution
this version of the function accepts a list of task names of input'''
# create a switch for linux - .sh and path setting will be different
batch1 = f"SET PATH=%PATH%;{path_geomodeller1}\n"
egen_batch = open(f'{path_model}/egen_batch_{run}.bat', "w")
egen_batch.write(batch1)
for m in range(0, len(tasks)):
batch2 = "geomodellerbatch " + str(tasks[m]) + "\n"
egen_batch.write(batch2)
egen_batch.close()
return
def egen_create_voxet_ensemble_batch(samples): # need to fix how the tasks args can be added to the batch without explicit indexing
'''create batch file .bat for windows for correct sequence of task file execution'''
# create a switch for linux - .sh and path setting will be different
task_list = [None] * samples * 2 # '2' because we are creating task entries for 1) model_n.task and 2) model_n_voxet.task
batch = f"SET PATH=%PATH%;{path_geomodeller1}\n"
for i in range(samples):
batch = batch + "geomodellerbatch model_" + str(i) + "_voxet.task\n"
egen_batch = open(f'{path_model}/egen_voxet_ensemble_batch.bat', "w")
egen_batch.write(batch)
egen_batch.close()
return
def egen_MC_cokrig_params(range = None, interface = None, orientation = None, drift = None):
'''set model interpolation parameters using cokriging'''
'''Range default = 10000.0]; Contacts_Nugget_Effect [default = 0.000001];
Gradients_Nugget_Effect [default = 0.01]; FaultDriftEquationDegree = 4 [default = 1]'''
if range is None:
range = 10000.0
if interface is None:
interface = 0.000001
if orientation is None:
orientation = 0.01
if drift is None:
drift = 1
task = f'''GeomodellerTask {{
OpenProjectNoGUI {{
filename: "{path_model}/{g_model_xml}"
}}
}}\n
GeomodellerTask {{
ComputeModel {{
SeriesList
{{
node: "all"
}}
SectionList {{
node: "all"
}}
ModelInterpolationParameters {{
Range: {range}
Contacts_Nugget_Effect: {interface}
Gradients_Nugget_Effect: {orientation}
FaultDriftEquationDegree: {drift}
}}
}}\n
GeomodellerTask {{
CloseProjectNoGUI {{
}}
}}'''
egen_interpolate = open(f'{path_output}/egen_MC_cokrig_params.task', "w")
egen_interpolate.write(task)
egen_interpolate.close()
return
def calc_voxet_ensemble(path_model, nx, ny, nz, model_from = None, model_to = None, litho = None, scalar=None, scalar_grads=None):
"""task for calculating and an ensemble of models, then
exporting voxets from the original model - litho, scalar, gradients etc"""
"""assumes you want the same voxel parameters for all voxets"""
"""if different cell sizes are needed, repeat this function with the appropriate params and voxet"""
'''path = path to model tasks
nx, ny, nz = number of cells on the respective axes
model_from, model_to = options here for splitting the ensemble into group for calc on multiple cores
litho, scalar, scalar_grads = None. Set to 'True' to boolean to export litho, scalar
scalar gradient voxets (gocad binary format).'''
#task_path = path_model
#ensemble_path = path_model + "/ensemble"
ensemble_path = path_model
os.chdir(path_model)
if not os.path.exists("./voxets"):
os.makedirs("./voxets")
pattern = "*.xml"
xml_names = glob.glob(pattern)
voxet_path = "../voxets"
#os.chdir(ensemble_path)
# if 'model_from' != locals():
# model_from = 0
# if 'model_to' != locals():
# model_to = len(xml_names)
if model_from is None:
model_from = 0
if model_to is None:
model_to = len(xml_names)
model_voxet = open(f'{ensemble_path}/model_{model_from}_{model_to}_voxet.task', "w")
for m in range(model_from, model_to):
model_xml = xml_names[m]
open_task = '''GeomodellerTask {
OpenProjectNoGUI {
filename: "%s"
}
}\n''' % (model_xml)
if litho is not None:
# save out lithology voxet
task1 = '''GeomodellerTask {
SaveLithologyVoxet {
nx:%d
ny:%d
nz:%d
LithologyVoxetFileStub: "%s/model_%i_gocad_litho"
}
}\n''' % (nx, ny, nz, voxet_path, m)
else:
task1 = ""
if scalar is not None:
task2 = '''GeomodellerTask {
SavePotentialGradientVoxet {
nx: %d
ny: %d
nz: %d
Just_Gradients: false
VoxetFileStub: "%s/model_%i_gocad_scalar"
}
}\n''' % (nx, ny, nz, voxet_path, m)
else:
task2 = ""
if scalar_grads is not None:
task3 = '''GeomodellerTask {
SavePotentialGradientVoxet {
nx: %d
ny: %d
nz: %d
Just_Gradients: true
VoxetFileStub: "%s/model_%i_gocad_scalar_grads"
}
}\n''' % (nx, ny, nz, voxet_path, m)
else:
task3 = ""
close_task = '''GeomodellerTask {
CloseProjectNoGUI {
}
}'''
model_voxet.write(open_task + task1 + task2 + task3 + close_task)
model_voxet.close()
return
#%% task builder
def task_builder(path, filename, class_file):
#def task_builder(path, egen_runs, series_calc=None, krig_range=None, interface=None, orientation=None, drift=None, fault_calc=None, litho=True, scalar=False, scalar_grads=False):
'''speed increase with numpy... maybe? instead of pandas'''
func_params = importlib.import_module(class_file)
path = pathlib.PurePosixPath(path) / filename
#if not os.path.exists("./ensemble"):
# os.makedirs("./ensemble")
#par_file = path.parent / par_file
#exec(open(path.parent / par_file).read())
#print(egen_runs)
contents = pd.read_csv(path, sep='\t', header=None, quotechar='\0') # + '/' + filename, sep='\t', header=None)
fault_info = pd.read_csv(path.parent / "output/fault_info.csv") # contents = task_file.readlines()
strat_info = pd.read_csv(path.parent / "output/strat.csv")
#contents = (contents)
# get first file part - everything up to where the data points are added
end_line = contents[0]==' Add3DInterfacesToFormation {'
idx = [a for a, x in enumerate(end_line) if x] # make list of row indices where the string above is found
task_pt1 = contents[0:(idx[0]-1)]
voxet_path = path.parent / "voxets/"
if not os.path.exists("./voxets"):
os.makedirs("./voxets")
for i in range(func_params.egen_project.egen_runs):
new_contacts = pd.read_csv(f'{path.parent}/output/contacts_{i}.csv')
new_contacts = new_contacts.round(6)
new_contacts['formation'] = new_contacts['formation'].str.strip()
new_orientations = pd.read_csv(f'{path.parent}/output/contacts_orient_{i}.csv')
new_orientations = new_orientations.round(6)
new_orientations['formation'] = new_orientations['formation'].str.strip()
#p_idx = new_orientations['polarity'] == 0 # replace polarity 1 = 'Normal_Polarity'; 0 = 'Reverse_Polarity'
#new_orientations['polarity'][p_idx] = 'Reverse_Polarity' # this may cause trouble (chained indexing), I have changed the parser to not convert polarity flags
#new_orientations['polarity'][p_idx==False] = 'Normal_Polarity'
tmp_contact_formations = new_contacts.formation.unique()
tmp_orient_formations = new_orientations.formation.unique()
# build the 'chunk' of the task file with contact info
# build the 'chunk' of the task file with orientation info
tmp_cont_chunk = | pd.DataFrame([]) | pandas.DataFrame |
# pylint: disable-msg=E1101,E1103
from datetime import datetime
import operator
import numpy as np
from pandas.core.index import Index
import pandas.core.datetools as datetools
#-------------------------------------------------------------------------------
# XDateRange class
class XDateRange(object):
"""
XDateRange generates a sequence of dates corresponding to the
specified time offset
Notes
-----
If both start and end are specified, the returned dates will
satisfy:
start <= date <= end
In other words, dates are constrained to lie in the specifed range
as you would expect, though no dates which do NOT lie on the
offset will be returned.
XDateRange is a generator, use if you do not intend to reuse the
date range, or if you are doing lazy iteration, or if the number
of dates you are generating is very large. If you intend to reuse
the range, use DateRange, which will be the list of dates
generated by XDateRange.
See also
--------
DateRange
"""
_cache = {}
_cacheStart = {}
_cacheEnd = {}
def __init__(self, start=None, end=None, nPeriods=None,
offset=datetools.BDay(), timeRule=None):
if timeRule is not None:
offset = | datetools.getOffset(timeRule) | pandas.core.datetools.getOffset |
import re
import pandas as pd
# import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
import numpy as np
import seaborn as sns; sns.set()
from scipy.spatial.distance import squareform
from scipy.spatial.distance import pdist, euclidean
from sklearn.preprocessing import MinMaxScaler
from datetime import datetime, timedelta
from io import StringIO, BytesIO
from app.models import Country, CountryStatus
import base64
import plotly.figure_factory as ff
data_dir = 'data/'
def get_all_places(level='countries'):
# df_places = pd.read_csv(data_dir + 'all_{}_compare.csv'.format(level))
df_places = Country.all_countries_names_as_df()
return list(df_places['Name'])
def get_all_countries_response():
df_places = pd.read_csv(data_dir + 'all_countries_response.csv')
return list(df_places['Country'])
def get_df_similar_places(place, level = 'countries'):
# if level == 'cities':
# df_sim = pd.read_csv(data_dir + 'all_{}_similarity.csv'.format(level))
# df_sim = df_sim[df_sim['CityBase'] == place]
# df_sim = df_sim[['Name', 'gap', 'dist', 'Similarity']].set_index('Name')
# return df_sim
# df_orig = pd.read_csv(data_dir + 'total_cases_{}_normalized.csv'.format(level))
df_orig = Country.all_countries_as_df()
df_orig_piv_day = df_orig.pivot(index='Name', columns='Day', values='TotalDeaths')
df_orig_piv_day = df_orig_piv_day.fillna(0)
sr_place = df_orig_piv_day.loc[place,]
place_start = (sr_place > 0).idxmax()
# place_start_cases = (df_orig.set_index('Name').loc[place,].set_index('Day')['Total'] > 0).idxmax()
days_ahead = 14 #if level == 'countries' else 5
df_places_ahead = df_orig_piv_day[df_orig_piv_day.loc[:, max(place_start - days_ahead,0)] > 0.0]
df_places_rate_norm = df_orig_piv_day.loc[df_places_ahead.index, :]
# df_places_rate_norm = df_orig_piv_day.loc[['France', 'Italy'], :]
df_places_rate_norm = df_places_rate_norm.append(df_orig_piv_day.loc[place,])
# reverse order to keep base place on top
df_places_rate_norm = df_places_rate_norm.iloc[::-1]
sr_place = df_orig_piv_day.loc[place,]
# place_start = (sr_place > 0).idxmax()
# sr_place_compare = sr_place.loc[place_start:].dropna()
sr_place = df_orig_piv_day.loc[place,]
place_start = (sr_place > 0).idxmax()
sr_place_compare = sr_place.loc[place_start:].dropna()
df_places_gap = pd.DataFrame({'Name': [], 'gap': [], 'dist': []})
df_places_gap = df_places_gap.append(pd.Series([place, 0.0, -1], index=df_places_gap.columns),
ignore_index=True)
for other_place in df_places_rate_norm.index[1:]:
sr_other_place = df_places_rate_norm.loc[other_place,].fillna(0)
min_dist = np.inf
min_pos = 0
for i in range(0, 1 + len(sr_other_place) - len(sr_place_compare)):
sr_other_place_compare = sr_other_place[i: i + len(sr_place_compare)]
dist = euclidean(sr_place_compare, sr_other_place_compare)
if (dist < min_dist):
min_dist = dist
min_pos = i
day_place2 = sr_other_place.index[min_pos]
gap = day_place2 - place_start
df_places_gap = df_places_gap.append(
pd.Series([other_place, gap, min_dist], index=df_places_gap.columns),
ignore_index=True)
df_places_gap = df_places_gap.set_index('Name')
similar_places = df_places_gap.sort_values('dist')
dist_max = euclidean(sr_place_compare, np.zeros(len(sr_place_compare)))
similar_places['Similarity'] = similar_places['dist'].apply(lambda x: (1.0 - x / dist_max) if x >= 0 else 1)
return similar_places
# get similar places based on alighment of death curve
def get_similar_places(place, level = 'countries'):
similar_places = get_df_similar_places(place, level = level)
# print(similar_places)
tuples = [tuple(x) for x in similar_places[1:8].reset_index().to_numpy()]
return tuples
#get similar places based on socioeconomic features
def get_similar_places_socio(place, level = 'countries'):
df_socio_stats_orig = pd.read_csv(data_dir + 'socio_stats_{}.csv'.format(level)).drop('score', axis=1)
if not len(df_socio_stats_orig.query('Name == "{}"'.format(place))): return []
df_socio_stats_orig_piv = df_socio_stats_orig.pivot(index='Name', columns='variable')
df_socio_stats_orig_piv = df_socio_stats_orig_piv.fillna(df_socio_stats_orig_piv.mean())
scaler = MinMaxScaler() # feature_range=(-1, 1)
df_socio_stats_orig_piv_norm = pd.DataFrame(scaler.fit_transform(df_socio_stats_orig_piv),
columns=df_socio_stats_orig_piv.columns,
index=df_socio_stats_orig_piv.index)
df_dist = pd.DataFrame(squareform(pdist(df_socio_stats_orig_piv_norm)), index=df_socio_stats_orig_piv_norm.index,
columns=df_socio_stats_orig_piv_norm.index)
df_sim = df_dist.loc[:, place].to_frame(name='dist')
df_sim['similarity'] = 1 - (df_sim['dist'] / df_sim['dist'].max())
df_sim = df_sim.sort_values('similarity', ascending=False).drop('dist', axis=1)
tuples = [tuple(x) for x in df_sim[1:11].reset_index().to_numpy()]
return tuples
def get_places_by_variable(type = 'socio', level = 'countries', variable = 'Population', ascending = False):
if type == 'socio':
df_orig = pd.read_csv(data_dir + 'socio_stats_{}.csv'.format(level)).drop('score', axis=1)
else:
df_orig = pd.read_csv(data_dir + 'live_stats_{}.csv'.format(level))
# df_orig = df_orig.groupby(['Name', 'Date']).tail(1)
df_orig = df_orig[df_orig['variable'] == variable].pivot(index='Name', columns='variable', values='value').reset_index()
df_orig = df_orig[['Name', variable]].sort_values(variable, ascending = ascending).head(10)
tuples = [tuple(x) for x in df_orig.reset_index(drop=True).to_numpy()]
return tuples
def get_fig_compare_rates(place, place2, level = 'countries', scale='log', y='total', mode='static', priority = 'now'):
df_places_to_show = get_place_comparison_df(place, place2, level = level, priority = priority)
fig = make_chart_comparison(df_places_to_show, level = level, scale=scale, y=y, mode=mode)
return fig
def get_html_compare_response(place, place2, level = 'countries', scale='log', y='total', mode='static', priority = 'now'):
# df_places_to_show = get_place_comparison_df(place, place2, level = level, priority = priority, type = 'response')
data_dir = 'data/'
df_orig = pd.read_csv(data_dir + 'response/official_response_countries.csv', parse_dates=['Date'])
cols = list(df_orig.columns[df_orig.dtypes.eq('float64')][:15]) + ['ConfirmedDeaths']
df_orig[cols] = df_orig[cols].astype(pd.Int64Dtype())
countries = [place, place2]
df_orig = df_orig[df_orig['Name'].isin(countries)]
df_gantt = df_orig[['Name', 'Date', 'StringencyIndexForDisplay', 'ConfirmedDeaths']].rename(
columns={'Date': 'Start', 'Name': 'Task'})
df_gantt['StringencyIndexForDisplay'] = df_gantt['StringencyIndexForDisplay'].fillna(0)
df_gantt['Finish'] = df_gantt['Start'] + timedelta(days=1)
df_gantt['Description'] = df_orig.apply(lambda
x: "Stringency Index: {StringencyIndexForDisplay}<br>Confirmed Deaths: {ConfirmedDeaths}<br>School closing: {C1_School closing}<br>Workplace closing: {C2_Workplace closing}<br>Cancel public events: {C3_Cancel public events}<br>Restrictions on gatherings: {C4_Restrictions on gatherings}<br>Close public transport: {C5_Close public transport}<br>Stay at home requirements: {C6_Stay at home requirements}<br>Restrictions on internal movement: {C7_Restrictions on internal movement}<br>International travel controls: {C8_International travel controls}".format(
**x), axis=1)
df_gantt['ConfirmedDeaths'] = np.log(df_gantt['ConfirmedDeaths'])
df_gantt = df_gantt.replace([-np.inf], 0)
df_gantt['DeathsNorm'] = 0.7 * (df_gantt['ConfirmedDeaths'] - df_gantt['ConfirmedDeaths'].min()) / (
df_gantt['ConfirmedDeaths'].max() - df_gantt['ConfirmedDeaths'].min()) - 0.35
df_gantt_c1 = df_gantt[df_gantt['Task'] == place]
df_gantt_c1['DeathsNorm'] = df_gantt_c1['DeathsNorm'] + 1
df_gantt_c2 = df_gantt[df_gantt['Task'] == place2]
fig = make_chart_response_comparison(df_gantt_c1, df_gantt_c2, level = level, scale=scale, y=y, mode=mode)
return fig
def get_html_compare_response_econ(place, place2, level = 'countries', scale='log', y='total', mode='static', priority = 'now'):
# df_places_to_show = get_place_comparison_df(place, place2, level = level, priority = priority, type = 'response')
data_dir = 'data/'
df_orig = pd.read_csv(data_dir + 'response/official_response_economic_countries.csv', parse_dates=['Date'])
# cols = list(df_orig.columns[df_orig.dtypes.eq('float64')][:15]) + ['ConfirmedDeaths']
# df_orig[cols] = df_orig[cols].astype(pd.Int64Dtype())
countries = [place, place2]
df_orig = df_orig[df_orig['Name'].isin(countries)]
df_gantt = df_orig[['Name', 'Date', 'EconomicSupportIndexForDisplay', 'ConfirmedDeaths', 'Description']].rename(
columns={'Date': 'Start', 'Name': 'Task'})
df_gantt['EconomicSupportIndexForDisplay'] = df_gantt['EconomicSupportIndexForDisplay'].fillna(0)
df_gantt['Finish'] = df_gantt['Start'] + timedelta(days=1)
df_gantt['ConfirmedDeaths'] = np.log(df_gantt['ConfirmedDeaths'])
df_gantt = df_gantt.replace([-np.inf], 0)
df_gantt['DeathsNorm'] = 0.7 * (df_gantt['ConfirmedDeaths'] - df_gantt['ConfirmedDeaths'].min()) / (
df_gantt['ConfirmedDeaths'].max() - df_gantt['ConfirmedDeaths'].min()) - 0.35
df_gantt_c1 = df_gantt[df_gantt['Task'] == place]
df_gantt_c1['DeathsNorm'] = df_gantt_c1['DeathsNorm'] + 1
df_gantt_c2 = df_gantt[df_gantt['Task'] == place2]
fig = make_chart_response_comparison(df_gantt_c1, df_gantt_c2, level = level, scale=scale, y=y, mode=mode, var='EconomicSupportIndexForDisplay')
return fig
def get_fig_compare_doubling_rates(place, place2, level = 'countries'):
df_places_to_show = get_place_comparison_df(place, place2, level = level)
fig = make_chart_comparison_growth(df_places_to_show, level = level)
return fig
def get_fig_response(country):
df_orig_response = pd.read_csv(data_dir + 'pollution_countries_raw.csv', parse_dates=['Date'])
df_orig_cases = pd.read_csv(data_dir + 'total_cases_countries_normalized.csv', parse_dates=['Date']).rename(
columns={'Name': 'Country'})
df_orig = pd.merge(df_orig_response, df_orig_cases, how='left')
df_to_show = df_orig[df_orig['Country'] == country][['Country', 'City', 'Date', 'no2', 'TotalDeaths']].sort_values('Date')
deaths_start = 10
start_deaths = (df_to_show['TotalDeaths'] >= deaths_start).idxmax()
avg_before_deaths = df_to_show.loc[:start_deaths, 'no2'].mean()
start_display = max(start_deaths - 60, 0)
df_to_show = df_to_show.loc[start_display:, ]
df_to_show['no2'] = df_to_show[['no2']].rolling(5).mean()
fig = make_chart_response(country, deaths_start, avg_before_deaths, df_to_show)
return fig
def get_places_gap_df(df_orig, place, place2, priority = 'now'):
df_places_gap = pd.DataFrame({'Name': [], 'gap': [], 'dist': []})
df_places_gap = df_places_gap.append(pd.Series([place, 0.0, -1], index=df_places_gap.columns),
ignore_index=True)
df_orig = df_orig.set_index('Name')
if not ((df_orig.loc[place,'TotalDeaths'].max()>0) and (df_orig.loc[place2,'TotalDeaths'].max()>0)):
# one of the places has 0 deaths
min_dist = 0 # means nothing here
dist_max = 1 # means nothing here
gap = 0
elif priority != 'now':
# must align based on beginning of deaths
day_place = (df_orig.loc[place,:].set_index('Day')['TotalDeaths'] > 10).idxmax()
day_place2 = (df_orig.loc[place2,:].set_index('Day')['TotalDeaths'] > 10).idxmax()
min_dist = 0 # means nothing here
dist_max = 1 # means nothing here
gap = day_place2 - day_place
else:
# similarity alignment
df_orig_piv_day = df_orig.reset_index().pivot(index='Name', columns='Day', values='TotalDeaths')
sr_place = df_orig_piv_day.loc[place,]
place_start = (sr_place > 0).idxmax()
sr_place_compare = sr_place.loc[place_start:].dropna()
sr_other_place = df_orig_piv_day.loc[place2,].fillna(0)
min_dist = np.inf
min_pos = 0
for i in range(0, 1 + len(sr_other_place) - len(sr_place_compare)):
sr_other_place_compare = sr_other_place[i: i + len(sr_place_compare)]
dist = euclidean(sr_place_compare, sr_other_place_compare)
if (dist < min_dist):
min_dist = dist
min_pos = i
dist_max = euclidean(sr_place_compare, np.zeros(len(sr_place_compare)))
day_place2 = sr_other_place.index[min_pos]
# gap = min_pos - place_start
gap = day_place2 - place_start
df_places_gap = df_places_gap.append(
pd.Series([place2, gap, min_dist], index=df_places_gap.columns),
ignore_index=True)
df_places_gap = df_places_gap.set_index('Name')#.sort_values('dist')
df_places_gap['Similarity'] = df_places_gap['dist'].apply(lambda x: (1.0 - x / dist_max) if x >= 0 else 1)
return df_places_gap
def get_total_cases_df_adjusted(df_orig, df_places_gap, place, place2):
df_total_cases = df_orig.set_index('Name')
df_total_cases_top = df_total_cases.join(df_places_gap)
df_total_cases_top['DayAdj'] = ((df_total_cases_top['Day'] - df_total_cases_top['gap']) - 1).astype(int)
# df_total_cases_top.loc[place2, 'DayAdj'] = ((df_total_cases_top.loc[place2, 'Day'] - df_total_cases_top.loc[place2, 'gap']) - 1)
# df_total_cases_top['DayAdj'] = df_total_cases_top['DayAdj'].astype(int)
return df_total_cases_top
def get_place_comparison_df(place, place2, level = 'countries', priority = 'now'):
# df_orig = pd.read_csv(data_dir + 'total_cases_{}_normalized.csv'.format(level))
df_orig = Country.all_countries_as_df()
# to force place order
df_orig_c1 = df_orig[df_orig['Name'] == place]
df_orig_c2 = df_orig[df_orig['Name'] == place2]
len_c1 = len(df_orig_c1[df_orig_c1['TotalDeaths'] > 0])
len_c2 = len(df_orig_c2[df_orig_c2['TotalDeaths'] > 0])
# place has to be the one with smallest number of values for Deaths
if (len_c1 > len_c2):
place, place2 = place2, place
df_orig = pd.concat([df_orig_c2, df_orig_c1])
else:
df_orig = pd.concat([df_orig_c1, df_orig_c2])
df_countries_gap = get_places_gap_df(df_orig, place, place2, priority)
df_total_cases_top = get_total_cases_df_adjusted(df_orig, df_countries_gap, place, place2)
place_start_cases = (df_orig.set_index('Name').loc[place,].set_index('Day')['Total'] > 0).idxmax()
df_total_cases_top = df_total_cases_top[df_total_cases_top['DayAdj'] >= place_start_cases]
return df_total_cases_top.reset_index()
def make_chart_comparison(df_places_to_show, level='countries', scale='log', y='total', mode='static'):
week = mdates.WeekdayLocator(interval=2) # every year
months = mdates.MonthLocator() # every month
month_fmt = mdates.DateFormatter('%b-%d')
var_y_suffix = '' if y == 'total' else 'Per100k'
label_y_scale = ' (log)' if scale == 'log' else ''
label_y_y = '' if y == 'total' else ' per 100k'
# get last date from dataframe
date = df_places_to_show['Date'].max() # datetime.today().strftime('%Y-%m-%d')
gap = int(df_places_to_show['gap'].min())
y_lim = df_places_to_show['Total' + var_y_suffix].max() #* 1.2
# Generate the figure **without using pyplot**.
fig = Figure(figsize=(8, 5))
ax = fig.subplots()
places_to_show = df_places_to_show['Name'].unique()[:2]
place_name = 'Country' if level == 'countries' else 'City'
df_places_to_show = df_places_to_show.rename(columns={'Name': place_name})
ax.set_title('{} Comparison - COVID-19 Cases vs. Deaths - {}'.format(place_name, date), fontsize=14)
sns.scatterplot(x="DayAdj", y='Total' + var_y_suffix, hue=place_name, lw=6, alpha=0.8, data=df_places_to_show,
ax=ax)
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(month_fmt)
ax.legend(loc='upper left', title="Confirmed cases", frameon=True)
ax.set(ylabel='Total confirmed cases{}{}'.format(label_y_y, label_y_scale),
xlabel="Date for {} ({}'s data shifted {} days to align death curves)".format(places_to_show[0],
places_to_show[1], gap))
ax.set_ylim(0.5, y_lim) if scale == 'log' else ax.set_ylim(-5, y_lim)
ax2 = ax.twinx()
if scale == 'log':
ax.set_yscale('log')
ax2.set_yscale('log')
ax.yaxis.set_major_formatter(ticker.FuncFormatter(lambda y, _: '{:g}'.format(y)))
ax2.yaxis.set_major_formatter(ticker.FuncFormatter(lambda y, _: '{:g}'.format(y)))
ax2.grid(False)
sns.lineplot(x="DayAdj", y='TotalDeaths' + var_y_suffix, hue=place_name, alpha=0.7, lw=6, ax=ax2,
data=df_places_to_show)
ax2.legend(loc='lower right', title="Deaths", frameon=True)
ax2.set(ylabel='Total deaths{}{}'.format(label_y_y, label_y_scale))
ax2.set_ylim(0.5, y_lim) if scale == 'log' else ax2.set_ylim(-5, y_lim)
logo = plt.imread('./static/img/new_logo_site.png')
ax.figure.figimage(logo, 95, 70, alpha=.35, zorder=1)
fig.tight_layout()
# display(fig)
# Save it to a temporary buffer.
buf = BytesIO()
fig.savefig(buf, format="png")
buf.seek(0)
return buf
def make_chart_response_comparison(df_gantt_c1, df_gantt_c2, level='countries', scale='log', y='total', mode='static', var='StringencyIndexForDisplay'):
# to force place order
df_gantt = pd.concat([df_gantt_c1, df_gantt_c2])
fig = ff.create_gantt(df_gantt, colors=['#93e4c1', '#333F44'], index_col=var,
show_colorbar=False, bar_width=0.2, showgrid_x=True, showgrid_y=True, group_tasks=True,
title='Comparing response',
height=350
)
fig.add_scatter(x=df_gantt_c1['Start'], y=df_gantt_c1['DeathsNorm'], hoverinfo='skip',
line=dict(color='rgb(222, 132, 82)', width=4))
fig.add_scatter(x=df_gantt_c2['Start'], y=df_gantt_c2['DeathsNorm'], hoverinfo='skip',
line=dict(color='rgb(222, 132, 82)', width=4))
fig.update_layout(
xaxis=dict(
showline=True,
showgrid=False,
showticklabels=True,
linecolor='rgb(204, 204, 204)',
linewidth=2,
ticks='outside',
tickfont=dict(
family='Arial',
size=12,
color='rgb(82, 82, 82)',
),
type="date"
),
yaxis=dict(
showgrid=False,
zeroline=False,
showline=False,
showticklabels=True,
autorange=True,
),
autosize=False,
margin=dict(
autoexpand=False,
l=100,
r=20,
t=110,
),
showlegend=False,
plot_bgcolor='white'
)
annotations = []
annotations.append(dict(xref='paper', yref='paper', x=0.5, y=-0.13,
xanchor='center', yanchor='top',
text='Date',
font=dict(family='Arial',
size=12,
color='rgb(150,150,150)'),
showarrow=False))
fig.update_layout(annotations=annotations)
# fig.write_html("gantt.html")
# fig.show()
html = fig.to_html(full_html=False, include_plotlyjs=False, )
return html
def make_chart_comparison_growth(df_places_to_show, level='countries'):
# get last date from dataframe
date = df_places_to_show['Date'].max() # datetime.today().strftime('%Y-%m-%d')
gap = int(df_places_to_show['gap'].min())
# Generate the figure **without using pyplot**.
fig = Figure(figsize=(8, 6))
axs = fig.subplots(nrows=2)
place_name = 'Country' if level == 'countries' else 'City'
axs[0].set_title('{} Comparison - COVID-19 Weekly Growth (%) - {}'.format(place_name, date), fontsize=14)
places_to_show = df_places_to_show['Name'].unique()[:2]
df_places_to_show = df_places_to_show.rename(columns={'Name': place_name})
sns.lineplot(x="DayAdj", y='WeeklyGrowth', hue=place_name, lw = 6, alpha = 0.8, ax=axs[0], data=df_places_to_show)
axs[0].set(ylabel='Weekly growth of cases', xlabel='')
axs[0].set_ylim(0, 500)
sns.lineplot(x="DayAdj", y='WeeklyGrowthDeaths', hue=place_name, alpha = 0.7, lw = 6, ax=axs[1], data=df_places_to_show)
axs[1].set(ylabel='Weekly growth of deaths', xlabel="Day ({}'s data shifted {} days for the death curves to align)".format(places_to_show[1], gap))
axs[1].set_ylim(0, 500)
# Save it to a temporary buffer.
buf = BytesIO()
fig.savefig(buf, format="png")
buf.seek(0)
return buf
def make_chart_response(country, deaths_start, avg_before_deaths, df_to_show):
city = df_to_show['City'].iloc[0]
df_quar = pd.read_csv(data_dir + 'all_countries_response.csv', parse_dates = ['Quarantine'])
quarantine = df_quar[df_quar['Country'] == country]['Quarantine'].iloc[0]
week = mdates.WeekdayLocator(interval=2) # every year
months = mdates.MonthLocator() # every month
month_fmt = mdates.DateFormatter('%b-%d')
y_lim = df_to_show['TotalDeaths'].max() * 1.2
y2_lim = df_to_show['no2'].max() * 1.8
# Generate the figure **without using pyplot**.
fig = Figure(figsize=(10, 5))
ax = fig.subplots()
ax.set_title('Assessing quarantine implementation - ' + country, fontsize=16, loc='left')
if not pd.isnull(quarantine): ax.axvline(x=quarantine, color='k', linestyle='--', lw=3, label='Official quarantine')
ax.scatter(df_to_show['Date'], df_to_show['TotalDeaths'], color='black', alpha = 0.7, label = 'Confirmed deaths')
ax.xaxis.set_major_locator(week)
ax.xaxis.set_major_formatter(month_fmt)
ax.set_yscale('log')
ax.yaxis.set_major_formatter(ticker.FuncFormatter(lambda y, _: '{:g}'.format(y)))
ax.set_ylim(1, y_lim)
ax.set(ylabel='Confirmed deaths')
ax2 = ax.twinx()
sns.lineplot(x="Date", y='no2', alpha = 0.7, lw = 6, label = 'Daily $\mathrm{{NO}}_2$ pollution *', ax=ax2, data=df_to_show)
sns.lineplot(x="Date", y=avg_before_deaths, alpha = 0.7, lw = 6, label = 'Average pollution **', ax=ax2, data=df_to_show)
ax2.grid(False)
ax2.xaxis.set_major_locator(week)
ax2.xaxis.set_major_formatter(month_fmt)
ax2.set_ylim(1, y2_lim)
ax2.set(ylabel='$\mathrm{{NO}}_2$ pollution')
# ask matplotlib for the plotted objects and their labels
lines, labels = ax.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc='upper left')
annotation = """* Median of $\mathrm{{NO}}_2$ measurements in the most affected city ({city}), 5 days rolling average over time series\n** Average daily $\mathrm{{NO}}_2$ measurements from the begining of 2020 until the first day after {deaths_start} deaths""".format(city=city, deaths_start = deaths_start)
ax.annotate(annotation, (0,0), (0, -30), xycoords='axes fraction', textcoords='offset points', va='top')
logo = plt.imread('./static/img/new_logo_site.png')
ax.figure.figimage(logo, 100, 110, alpha=.35, zorder=1)
fig.tight_layout()
# Save it to a temporary buffer.
buf = BytesIO()
fig.savefig(buf, format="png")
buf.seek(0)
return buf
def get_timeline_list(place, place2, level = 'countries'):
# df_orig = pd.read_csv(data_dir + 'total_cases_{}_normalized.csv'.format(level))
df_orig = Country.all_countries_as_df()
# to force place order
df_orig_c1 = df_orig[df_orig['Name'] == place]
df_orig_c2 = df_orig[df_orig['Name'] == place2]
len_c1 = len(df_orig_c1[df_orig_c1['TotalDeaths'] > 0])
len_c2 = len(df_orig_c2[df_orig_c2['TotalDeaths'] > 0])
# place has to be the one with smallest number of values for Deaths
if (len_c1 > len_c2):
place, place2 = place2, place
df_orig = pd.concat([df_orig_c2, df_orig_c1])
else:
df_orig = pd.concat([df_orig_c1, df_orig_c2])
df_places_gap = get_places_gap_df(df_orig, place, place2)
df_total_cases_top = get_total_cases_df_adjusted(df_orig, df_places_gap, place, place2)
places = [place, place2]
df_places_to_show = df_total_cases_top.loc[places, :]
places_to_show = list(df_places_to_show.index.unique())
df_events_owd = pd.DataFrame({'Date': [], 'Name': [], 'Desc': [], 'FullText': [], 'Highlight': []})
today = df_places_to_show['Date'].max()
for c in places_to_show:
df_place = df_places_to_show.loc[c,]
# df_events_owd = df_events_owd.append(pd.DataFrame({'Date':['2019-12-31'], 'Name': [c], 'Desc':['Begining of epidemic'], 'FullText':['First day of data tracking.']}))
df_events_owd = df_events_owd.append(
pd.Series([(df_place.set_index('Date')['Total'] > 0).idxmax(), c, '1st Confirmed Case', '', 1],
index=df_events_owd.columns), ignore_index=True)
df_events_owd = df_events_owd.append(
pd.Series([(df_place.set_index('Date')['TotalDeaths'] > 0).idxmax(), c, '1st Death', '', 5],
index=df_events_owd.columns), ignore_index=True)
msg = """{} is approximately {} days behind {}'s epidemic progression.
This is an estimate based on matching their death growth curves.""".format(place, abs(
df_places_gap.loc[place2, 'gap']), place2)
df_events_owd = df_events_owd.append( | pd.Series([today, c, 'Today', msg, 1], index=df_events_owd.columns) | pandas.Series |
import pandas as pd
import numpy as np
def handle_missing_values(df, prop_required_row = 0.75, prop_required_col = 0.75):
''' function which takes in a dataframe, required notnull proportions of non-null rows and columns.
drop the columns and rows columns based on theshold:'''
#drop columns with nulls
threshold = int(prop_required_col * len(df.index)) # Require that many non-NA values.
df = df.dropna(axis = 1, thresh = threshold)
#drop rows with nulls
threshold = int(prop_required_row * len(df.columns)) # Require that many non-NA values.
df = df.dropna(axis = 0, thresh = threshold)
return df
def missing_zero_values_table(df):
'''This function will look at any data set and report back on zeros and nulls for every column while also giving percentages of total values
and also the data types. The message prints out the shape of the data frame and also tells you how many columns have nulls '''
zero_val = (df == 0.00).astype(int).sum(axis=0)
null_count = df.isnull().sum()
mis_val_percent = 100 * df.isnull().sum() / len(df)
mz_table = | pd.concat([zero_val, null_count, mis_val_percent], axis=1) | pandas.concat |
from __future__ import print_function, division, absolute_import
import collections
import functools as ft
import json
import operator as op
import os.path
import re
import pandas as pd
from pandas.core.dtypes.api import is_scalar
def escape_parameters(params):
if isinstance(params, dict):
return {k: escape(v) for k, v in params.items()}
elif isinstance(params, tuple):
return tuple(escape(v) for v in params)
else:
raise NotImplementedError('cannot escape parameters of type %s' % type(params))
def escape(val):
if val is None:
return 'null'
elif isinstance(val, str):
return "'" + val.replace("'", "''") + "'"
elif isinstance(val, (int, bool, float)):
return json.dumps(val)
else:
raise NotImplementedError()
def like(s, pattern):
"""Execute a SQL ``like`` expression against a str-series."""
pattern = re.escape(pattern)
pattern = pattern.replace(r'\%', '.*')
pattern = pattern.replace(r'\_', '.')
pattern = '^' + pattern + '$'
# sqlite is case insenstive, is this always the case?
if is_scalar(s):
return re.match(pattern, s) is not None
else:
return s.str.contains(pattern)
def not_like(s, pattern):
"""Execute a SQL ``not like`` expression against a str-series."""
res = like(s, pattern)
if is_scalar(s):
return not res
else:
# handle inversion with missing numbers
return (1 - res).astype(res.dtype)
def trim(what, characters, s):
s = _str_funcs(s)
if what == 'leading':
return s.lstrip(characters)
elif what == 'trailing':
return s.rstrip(characters)
elif what == 'both':
return s.strip(characters)
raise ValueError('unknown trim mode %s' % what)
def position(needle, haystack):
return _str_funcs(haystack).find(needle) + 1
def upper(s):
return _str_funcs(s).upper()
def lower(s):
return _str_funcs(s).lower()
def concat(head, *tail):
strings = [head] + list(tail)
strings = [_fillna(s, '') for s in strings]
return ft.reduce(op.add, strings)
def _str_funcs(s):
return s if is_scalar(s) else pd.Series(s).str
def _fillna(obj, missing):
if | is_scalar(obj) | pandas.core.dtypes.api.is_scalar |
# Example of CBF for research-paper domain
# <NAME>
from nltk.stem.snowball import SnowballStemmer
import pandas as pd
from nltk.corpus import stopwords
# --------------------------------------------------------
user_input_data = "It is known that the performance of an optimal control strategy obtained from an off-line " \
"computation is degraded under the presence of model mismatch. In order to improve the control " \
"performance, a hybrid neural network and on-line optimal control strategy are proposed in this " \
"study and demonstrated for the control of a fed-batch bioreactor for ethanol fermentation. The " \
"information of the optimal feed profile of the fed-batch reactor. The simulation results show " \
"that the neural network provides a good estimate of unmeasured variables and the on-line optimal " \
"control with the neural network estimator gives a better control performance in terms of the " \
"amount of the desired ethanol product, compared with a conventional off-line optimal control " \
"method."
user_title = "user undefined title"
# --------------------------------------------------------
metadata = pd.read_json('sample-records', lines=True)
user_data = pd.DataFrame([[user_input_data, user_title]], columns=['paperAbstract', 'title'])
metadata = | pd.concat([metadata, user_data], sort=True) | pandas.concat |
import pandas, numpy
from pandas.util import hash_pandas_object
from .warnings import ignore_warnings
from sklearn.metrics import r2_score, make_scorer
from sklearn.exceptions import DataConversionWarning
from sklearn.model_selection import cross_val_score, cross_val_predict, cross_validate
from sklearn.model_selection import StratifiedKFold, KFold, RepeatedKFold, RepeatedStratifiedKFold
def multiscore(Y, Y_pred, sample_weight=None):
"""
Returns the coefficients of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the residual
sum of squares ((y_true - y_pred) ** 2).sum() and v is the total
sum of squares ((y_true - y_true.mean()) ** 2).sum().
The best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Notes
-----
R^2 is calculated by weighting all the targets equally using
`multioutput='raw_values'`. See documentation for
sklearn.metrics.r2_score for more information.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples. For some estimators this may be a
precomputed kernel matrix instead, shape = (n_samples,
n_samples_fitted], where n_samples_fitted is the number of
samples used in the fitting for the estimator.
Y : array-like, shape = (n_samples, n_outputs)
True values for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : ndarray
R^2 of self.predict(X) wrt. Y.
"""
return r2_score(Y, Y_pred, sample_weight=sample_weight,
multioutput='raw_values')
def single_multiscore(n=0):
return lambda *args, **kwargs: multiscore(*args, **kwargs)[n]
def check_cv(cv='warn', y=None, classifier=False, random_state=None, n_repeats=1, shuffle=False):
"""Input checker utility for building a cross-validator
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.20
``cv`` default value will change from 3-fold to 5-fold in v0.22.
y : array-like, optional
The target variable for supervised learning problems.
classifier : boolean, optional, default False
Whether the task is a classification task, in which case
stratified KFold will be used.
n_repeats : int, default 1
If greater than 1, a 'RepeatedKFold' or 'RepeatedStratifiedKFold'
will be used.
shuffle : bool, default False
Whether to shuffle the observation indexes before cross-validation.
This happens automatically when n_repeats is greater than 1.
random_state : int or RandomState
Used for shuffling.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
import numbers
from sklearn.utils.multiclass import type_of_target
if n_repeats is None:
n_repeats = 1
if isinstance(cv, numbers.Integral):
if (classifier and (y is not None) and (type_of_target(y) in ('binary', 'multiclass'))):
if n_repeats>1:
return RepeatedStratifiedKFold(cv, random_state=random_state, n_repeats=n_repeats, shuffle=shuffle)
else:
return StratifiedKFold(cv, random_state=random_state, shuffle=shuffle)
else:
if n_repeats>1:
return RepeatedKFold(cv, random_state=random_state, n_repeats=n_repeats, shuffle=shuffle)
else:
return KFold(cv, random_state=random_state, shuffle=shuffle)
return cv # New style cv objects are passed without any modification
class CrossValMixin:
def _cross_validate(
self,
X,
Y,
cv=5,
S=None,
random_state=None,
cache_metadata=None,
use_cache=True,
n_repeats=1,
shuffle=False,
n_jobs=-1,
):
"""
Compute the cross validation scores for this model.
Unlike other scikit-learn scores, this method returns
a separate score value for each output when the estimator
is for a multi-output process.
If the estimator includes a `sample_stratification`
attribute, it is used along with the
ExogenouslyStratifiedKFold splitter.
Args:
X, Y : array-like
The independent and dependent data to use for
cross-validation.
cv : int, default 5
The number of folds to use in cross-validation.
S : array-like
The stratification data to use for stratified
cross-validation. This data must be categorical
(or convertible into such), and should be a
vector of length equal to the first dimension
(i.e. number of observations) in the `X` and `Y`
arrays.
Returns:
pandas.Series: The cross-validation scores, by output.
"""
if not hasattr(self, '_cross_validate_results'):
self._cross_validate_results = {}
try:
if random_state is None or not use_cache:
raise KeyError()
hashkey = hash((
hash_pandas_object(X).sum(),
hash_pandas_object(Y).sum(),
cv,
| hash_pandas_object(S) | pandas.util.hash_pandas_object |
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).
__all__ = ['makeMixedDataFrame', 'getCrashes', 'is_numeric', 'drop_singletons', 'discretize']
# Cell
import pandas as pd
from pandas.api.types import is_numeric_dtype as isnum
#from matplotlib.pyplot import rcParams
# Cell
def makeMixedDataFrame():
'''Return a constant mixed-type dataframe [float, float, str, datetime]'''
return pd.DataFrame(
{'A': {0: 0.0, 1: 1.0, 2: 2.0, 3: 3.0, 4: 4.0},
'B': {0: 0.0, 1: 1.0, 2: 0.0, 3: 1.0, 4: 0.0},
'C': {0: 'foo1', 1: 'foo2', 2: 'foo3', 3: 'foo4', 4: 'foo5'},
'D': {0: pd.Timestamp('2009-01-01 00:00:00'),
1: | pd.Timestamp('2009-01-02 00:00:00') | pandas.Timestamp |
import openpyxl
import pandas as pd
REQUIRED_COLUMNS = ['<NAME>', 'Name', 'M/F', 'Field of Study', 'Nationality']
teaming_columns = ['1st', '2nd', 'Partner']
# Source: https://sashat.me/2017/01/11/list-of-20-simple-distinct-colors/
_colors = ['#e6194B', '#3cb44b', '#ffe119', '#4363d8', '#f58231', '#911eb4',
'#42d4f4', '#f032e6', '#bfef45', '#fabebe', '#469990', '#e6beff',
'#9A6324', '#fffac8', '#800000', '#000075']
_font_colors = ['white', 'white', 'black', 'white', 'white', 'white',
'black', 'white', 'black', 'black', 'white', 'black',
'white', 'black', 'white', 'white']
# Specified by Christian
colors = [
'#c27ba0', '#8e7ac5', '#d9d9d9', '#6b9cee', '#92c57a', '#ffda5c', '#f7b365',
'#ce4019', '#ead1dc', '#d9d0ea', '#f3f3f3', '#c7d8f9', '#d9e9d2', '#fff1ca',
'#fde5cb', '#f5ccca']
font_colors = [
'white', 'white', 'black', 'white', 'black', 'black', 'black',
'white', 'black', 'black', 'black', 'black', 'black', 'black',
'black', 'black']
disciplines = [
'Business', 'Creative Disciplines', 'Engineering', 'Humanities',
'Life Sciences', 'Media', 'Social Sciences' ]
discipline_colors = [
'#4783eb', '#ff9a00', '#68a94a', '#8e7ac5', '#d9d0ea', '#fde5cb', '#ffff00']
discipline_font_colors = [
'white', 'black', 'white', 'white', 'black', 'black', 'black']
def add_teaming_colors(teaming, workbook, worksheet):
first_column = 65 + len(teaming.columns) - 3 # A=65, equals list(teaming.columns).index('1st')
start = f'{chr(first_column)}2'
end = f'{chr(first_column+2)}{len(teaming.index)+1}'
for i, (color, font_color) in enumerate(zip(colors, font_colors)):
color_format = workbook.add_format({'bg_color': color, 'font_color': font_color})
worksheet.conditional_format(f'{start}:{end}', {
'type': 'cell',
'criteria': 'equal to',
'value': i + 1,
'format': color_format})
def add_discipline_colors(teaming, workbook, worksheet):
discipline_column = chr(65 + list(teaming.columns).index('Field of Study'))
start = f'{discipline_column}2'
end = f'{discipline_column}{len(teaming.index)+1}'
for discipline, color, font_color in zip(disciplines, discipline_colors, discipline_font_colors):
color_format = workbook.add_format({'bg_color': color, 'font_color': font_color})
worksheet.conditional_format(f'{start}:{end}', {
'type': 'cell',
'criteria': 'equal to',
# Excel requires strings to be double quoted
'value': f'"{discipline}"',
'format': color_format})
def add_centering_and_spacing(teaming, workbook, worksheet):
centered = workbook.add_format()
centered.set_align('center')
for idx, col_name in enumerate(teaming):
col_len = max((
teaming[col_name].astype(str).str.len().max(), # len of largest item
len(str(col_name)) # len of column name/header
)) + 1 # Adding a little extra space
worksheet.set_column(idx, idx, col_len, centered if col_len < 5 else None)
def add_collisions(collisions, writer, workbook):
writer.createWorkSheet('Collisions')
collisions = pd.DataFrame(collisions, columns=['Student', 'Student', 'Teams'])
collisions.to_excel(writer, sheet_name='Collisions', index=False)
worksheet = writer.sheets['Teamings']
def export(teaming, filename, collisions=None):
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = | pd.ExcelWriter(f'{filename}.xlsx', engine='xlsxwriter') | pandas.ExcelWriter |
import ast
import time
import numpy as np
import pandas as pd
from copy import deepcopy
from typing import Any
from matplotlib import dates as mdates
from scipy import stats
from aistac.components.aistac_commons import DataAnalytics
from ds_discovery.components.transitioning import Transition
from ds_discovery.components.commons import Commons
from aistac.properties.abstract_properties import AbstractPropertyManager
from ds_discovery.components.discovery import DataDiscovery
from ds_discovery.intent.abstract_common_intent import AbstractCommonsIntentModel
__author__ = '<NAME>'
class AbstractBuilderIntentModel(AbstractCommonsIntentModel):
_INTENT_PARAMS = ['self', 'save_intent', 'column_name', 'intent_order',
'replace_intent', 'remove_duplicates', 'seed']
def __init__(self, property_manager: AbstractPropertyManager, default_save_intent: bool=None,
default_intent_level: [str, int, float]=None, default_intent_order: int=None,
default_replace_intent: bool=None):
"""initialisation of the Intent class.
:param property_manager: the property manager class that references the intent contract.
:param default_save_intent: (optional) The default action for saving intent in the property manager
:param default_intent_level: (optional) the default level intent should be saved at
:param default_intent_order: (optional) if the default behaviour for the order should be next available order
:param default_replace_intent: (optional) the default replace existing intent behaviour
"""
default_save_intent = default_save_intent if isinstance(default_save_intent, bool) else True
default_replace_intent = default_replace_intent if isinstance(default_replace_intent, bool) else True
default_intent_level = default_intent_level if isinstance(default_intent_level, (str, int, float)) else 'A'
default_intent_order = default_intent_order if isinstance(default_intent_order, int) else 0
intent_param_exclude = ['size']
intent_type_additions = [np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, pd.Timestamp]
super().__init__(property_manager=property_manager, default_save_intent=default_save_intent,
intent_param_exclude=intent_param_exclude, default_intent_level=default_intent_level,
default_intent_order=default_intent_order, default_replace_intent=default_replace_intent,
intent_type_additions=intent_type_additions)
def run_intent_pipeline(self, canonical: Any=None, intent_levels: [str, int, list]=None, run_book: str=None,
seed: int=None, simulate: bool=None, **kwargs) -> pd.DataFrame:
"""Collectively runs all parameterised intent taken from the property manager against the code base as
defined by the intent_contract. The whole run can be seeded though any parameterised seeding in the intent
contracts will take precedence
:param canonical: a direct or generated pd.DataFrame. see context notes below
:param intent_levels: (optional) a single or list of intent_level to run in order given
:param run_book: (optional) a preset runbook of intent_level to run in order
:param seed: (optional) a seed value that will be applied across the run: default to None
:param simulate: (optional) returns a report of the order of run and return the indexed column order of run
:return: a pandas dataframe
"""
simulate = simulate if isinstance(simulate, bool) else False
col_sim = {"column": [], "order": [], "method": []}
# legacy
if 'size' in kwargs.keys():
canonical = kwargs.pop('size')
canonical = self._get_canonical(canonical)
size = canonical.shape[0] if canonical.shape[0] > 0 else 1000
# test if there is any intent to run
if self._pm.has_intent():
# get the list of levels to run
if isinstance(intent_levels, (str, list)):
column_names = Commons.list_formatter(intent_levels)
elif isinstance(run_book, str) and self._pm.has_run_book(book_name=run_book):
column_names = self._pm.get_run_book(book_name=run_book)
else:
# put all the intent in order of model, get, correlate, associate
_model = []
_get = []
_correlate = []
_frame_start = []
_frame_end = []
for column in self._pm.get_intent().keys():
for order in self._pm.get(self._pm.join(self._pm.KEY.intent_key, column), {}):
for method in self._pm.get(self._pm.join(self._pm.KEY.intent_key, column, order), {}).keys():
if str(method).startswith('get_'):
if column in _correlate + _frame_start + _frame_end:
continue
_get.append(column)
elif str(method).startswith('model_'):
_model.append(column)
elif str(method).startswith('correlate_'):
if column in _get:
_get.remove(column)
_correlate.append(column)
elif str(method).startswith('frame_'):
if column in _get:
_get.remove(column)
if str(method).startswith('frame_starter'):
_frame_start.append(column)
else:
_frame_end.append(column)
column_names = Commons.list_unique(_frame_start + _get + _model + _correlate + _frame_end)
for column in column_names:
level_key = self._pm.join(self._pm.KEY.intent_key, column)
for order in sorted(self._pm.get(level_key, {})):
for method, params in self._pm.get(self._pm.join(level_key, order), {}).items():
try:
if method in self.__dir__():
if simulate:
col_sim['column'].append(column)
col_sim['order'].append(order)
col_sim['method'].append(method)
continue
result = []
params.update(params.pop('kwargs', {}))
if isinstance(seed, int):
params.update({'seed': seed})
_ = params.pop('intent_creator', 'Unknown')
if str(method).startswith('get_'):
result = eval(f"self.{method}(size=size, save_intent=False, **params)",
globals(), locals())
elif str(method).startswith('correlate_'):
result = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
elif str(method).startswith('model_'):
canonical = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
continue
elif str(method).startswith('frame_starter'):
canonical = self._get_canonical(params.pop('canonical', canonical), deep_copy=False)
size = canonical.shape[0]
canonical = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
continue
elif str(method).startswith('frame_'):
canonical = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
continue
if 0 < size != len(result):
raise IndexError(f"The index size of '{column}' is '{len(result)}', "
f"should be {size}")
canonical[column] = result
except ValueError as ve:
raise ValueError(f"intent '{column}', order '{order}', method '{method}' failed with: {ve}")
except TypeError as te:
raise TypeError(f"intent '{column}', order '{order}', method '{method}' failed with: {te}")
if simulate:
return pd.DataFrame.from_dict(col_sim)
return canonical
def _get_number(self, from_value: [int, float]=None, to_value: [int, float]=None, relative_freq: list=None,
precision: int=None, ordered: str=None, at_most: int=None, size: int=None,
seed: int=None) -> list:
""" returns a number in the range from_value to to_value. if only to_value given from_value is zero
:param from_value: (signed) integer to start from
:param to_value: optional, (signed) integer the number sequence goes to but not include
:param relative_freq: a weighting pattern or probability that does not have to add to 1
:param precision: the precision of the returned number. if None then assumes int value else float
:param ordered: order the data ascending 'asc' or descending 'dec', values accepted 'asc' or 'des'
:param at_most: the most times a selection should be chosen
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
"""
if not isinstance(from_value, (int, float)) and not isinstance(to_value, (int, float)):
raise ValueError(f"either a 'range_value' or a 'range_value' and 'to_value' must be provided")
if not isinstance(from_value, (float, int)):
from_value = 0
if not isinstance(to_value, (float, int)):
(from_value, to_value) = (0, from_value)
if to_value <= from_value:
raise ValueError("The number range must be a positive different, found to_value <= from_value")
at_most = 0 if not isinstance(at_most, int) else at_most
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
precision = 3 if not isinstance(precision, int) else precision
if precision == 0:
from_value = int(round(from_value, 0))
to_value = int(round(to_value, 0))
is_int = True if (isinstance(to_value, int) and isinstance(from_value, int)) else False
if is_int:
precision = 0
# build the distribution sizes
if isinstance(relative_freq, list) and len(relative_freq) > 1:
freq_dist_size = self._freq_dist_size(relative_freq=relative_freq, size=size, seed=_seed)
else:
freq_dist_size = [size]
# generate the numbers
rtn_list = []
generator = np.random.default_rng(seed=_seed)
dtype = int if is_int else float
bins = np.linspace(from_value, to_value, len(freq_dist_size) + 1, dtype=dtype)
for idx in np.arange(1, len(bins)):
low = bins[idx - 1]
high = bins[idx]
if low >= high:
continue
elif at_most > 0:
sample = []
for _ in np.arange(at_most, dtype=dtype):
count_size = freq_dist_size[idx - 1] * generator.integers(2, 4, size=1)[0]
sample += list(set(np.linspace(bins[idx - 1], bins[idx], num=count_size, dtype=dtype,
endpoint=False)))
if len(sample) < freq_dist_size[idx - 1]:
raise ValueError(f"The value range has insufficient samples to choose from when using at_most."
f"Try increasing the range of values to sample.")
rtn_list += list(generator.choice(sample, size=freq_dist_size[idx - 1], replace=False))
else:
if dtype == int:
rtn_list += generator.integers(low=low, high=high, size=freq_dist_size[idx - 1]).tolist()
else:
choice = generator.random(size=freq_dist_size[idx - 1], dtype=float)
choice = np.round(choice * (high-low)+low, precision).tolist()
# make sure the precision
choice = [high - 10**(-precision) if x >= high else x for x in choice]
rtn_list += choice
# order or shuffle the return list
if isinstance(ordered, str) and ordered.lower() in ['asc', 'des']:
rtn_list.sort(reverse=True if ordered.lower() == 'asc' else False)
else:
generator.shuffle(rtn_list)
return rtn_list
def _get_category(self, selection: list, relative_freq: list=None, size: int=None, at_most: int=None,
seed: int=None) -> list:
""" returns a category from a list. Of particular not is the at_least parameter that allows you to
control the number of times a selection can be chosen.
:param selection: a list of items to select from
:param relative_freq: a weighting pattern that does not have to add to 1
:param size: an optional size of the return. default to 1
:param at_most: the most times a selection should be chosen
:param seed: a seed value for the random function: default to None
:return: an item or list of items chosen from the list
"""
if not isinstance(selection, list) or len(selection) == 0:
return [None]*size
_seed = self._seed() if seed is None else seed
select_index = self._get_number(len(selection), relative_freq=relative_freq, at_most=at_most, size=size,
seed=_seed)
rtn_list = [selection[i] for i in select_index]
return list(rtn_list)
def _get_datetime(self, start: Any, until: Any, relative_freq: list=None, at_most: int=None, ordered: str=None,
date_format: str=None, as_num: bool=None, ignore_time: bool=None, size: int=None,
seed: int=None, day_first: bool=None, year_first: bool=None) -> list:
""" returns a random date between two date and/or times. weighted patterns can be applied to the overall date
range.
if a signed 'int' type is passed to the start and/or until dates, the inferred date will be the current date
time with the integer being the offset from the current date time in 'days'.
if a dictionary of time delta name values is passed this is treated as a time delta from the start time.
for example if start = 0, until = {days=1, hours=3} the date range will be between now and 1 days and 3 hours
Note: If no patterns are set this will return a linearly random number between the range boundaries.
:param start: the start boundary of the date range can be str, datetime, pd.datetime, pd.Timestamp or int
:param until: up until boundary of the date range can be str, datetime, pd.datetime, pd.Timestamp, pd.delta, int
:param relative_freq: (optional) A pattern across the whole date range.
:param at_most: the most times a selection should be chosen
:param ordered: order the data ascending 'asc' or descending 'dec', values accepted 'asc' or 'des'
:param ignore_time: ignore time elements and only select from Year, Month, Day elements. Default is False
:param date_format: the string format of the date to be returned. if not set then pd.Timestamp returned
:param as_num: returns a list of Matplotlib date values as a float. Default is False
:param size: the size of the sample to return. Default to 1
:param seed: a seed value for the random function: default to None
:param year_first: specifies if to parse with the year first
If True parses dates with the year first, eg 10/11/12 is parsed as 2010-11-12.
If both dayfirst and yearfirst are True, yearfirst is preceded (same as dateutil).
:param day_first: specifies if to parse with the day first
If True, parses dates with the day first, eg %d-%m-%Y.
If False default to the a preferred preference, normally %m-%d-%Y (but not strict)
:return: a date or size of dates in the format given.
"""
# pre check
if start is None or until is None:
raise ValueError("The start or until parameters cannot be of NoneType")
# Code block for intent
as_num = False if not isinstance(as_num, bool) else as_num
ignore_time = False if not isinstance(ignore_time, bool) else ignore_time
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
if isinstance(start, int):
start = (pd.Timestamp.now() + pd.Timedelta(days=start))
if isinstance(until, int):
until = (pd.Timestamp.now() + pd.Timedelta(days=until))
if isinstance(until, dict):
until = (start + pd.Timedelta(**until))
if start == until:
rtn_list = [self._convert_date2value(start, day_first=day_first, year_first=year_first)[0]] * size
else:
_dt_start = self._convert_date2value(start, day_first=day_first, year_first=year_first)[0]
_dt_until = self._convert_date2value(until, day_first=day_first, year_first=year_first)[0]
precision = 15
if ignore_time:
_dt_start = int(_dt_start)
_dt_until = int(_dt_until)
precision = 0
rtn_list = self._get_number(from_value=_dt_start, to_value=_dt_until, relative_freq=relative_freq,
at_most=at_most, ordered=ordered, precision=precision, size=size, seed=seed)
if not as_num:
rtn_list = mdates.num2date(rtn_list)
if isinstance(date_format, str):
rtn_list = pd.Series(rtn_list).dt.strftime(date_format).to_list()
else:
rtn_list = pd.Series(rtn_list).dt.tz_convert(None).to_list()
return rtn_list
def _get_intervals(self, intervals: list, relative_freq: list=None, precision: int=None, size: int=None,
seed: int=None) -> list:
""" returns a number based on a list selection of tuple(lower, upper) interval
:param intervals: a list of unique tuple pairs representing the interval lower and upper boundaries
:param relative_freq: a weighting pattern or probability that does not have to add to 1
:param precision: the precision of the returned number. if None then assumes int value else float
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
:return: a random number
"""
# Code block for intent
size = 1 if size is None else size
if not isinstance(precision, int):
precision = 0 if all(isinstance(v[0], int) and isinstance(v[1], int) for v in intervals) else 3
_seed = self._seed() if seed is None else seed
if not all(isinstance(value, tuple) for value in intervals):
raise ValueError("The intervals list must be a list of tuples")
interval_list = self._get_category(selection=intervals, relative_freq=relative_freq, size=size, seed=_seed)
interval_counts = pd.Series(interval_list, dtype='object').value_counts()
rtn_list = []
for index in interval_counts.index:
size = interval_counts[index]
if size == 0:
continue
if len(index) == 2:
(lower, upper) = index
if index == 0:
closed = 'both'
else:
closed = 'right'
else:
(lower, upper, closed) = index
if lower == upper:
rtn_list += [round(lower, precision)] * size
continue
if precision == 0:
margin = 1
else:
margin = 10**(((-1)*precision)-1)
if str.lower(closed) == 'neither':
lower += margin
upper -= margin
elif str.lower(closed) == 'right':
lower += margin
elif str.lower(closed) == 'both':
upper += margin
# correct adjustments
if lower >= upper:
upper = lower + margin
rtn_list += self._get_number(lower, upper, precision=precision, size=size, seed=_seed)
np.random.default_rng(seed=_seed).shuffle(rtn_list)
return rtn_list
def _get_dist_normal(self, mean: float, std: float, size: int=None, seed: int=None) -> list:
"""A normal (Gaussian) continuous random distribution.
:param mean: The mean (“centre”) of the distribution.
:param std: The standard deviation (jitter or “width”) of the distribution. Must be >= 0
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.normal(loc=mean, scale=std, size=size))
return rtn_list
def _get_dist_logistic(self, mean: float, std: float, size: int=None, seed: int=None) -> list:
"""A logistic continuous random distribution.
:param mean: The mean (“centre”) of the distribution.
:param std: The standard deviation (jitter or “width”) of the distribution. Must be >= 0
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.logistic(loc=mean, scale=std, size=size))
return rtn_list
def _get_dist_exponential(self, scale: [int, float], size: int=None, seed: int=None) -> list:
"""An exponential continuous random distribution.
:param scale: The scale of the distribution.
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.exponential(scale=scale, size=size))
return rtn_list
def _get_dist_gumbel(self, mean: float, std: float, size: int=None, seed: int=None) -> list:
"""An gumbel continuous random distribution.
The Gumbel (or Smallest Extreme Value (SEV) or the Smallest Extreme Value Type I) distribution is one of
a class of Generalized Extreme Value (GEV) distributions used in modeling extreme value problems.
The Gumbel is a special case of the Extreme Value Type I distribution for maximums from distributions
with “exponential-like” tails.
:param mean: The mean (“centre”) of the distribution.
:param std: The standard deviation (jitter or “width”) of the distribution. Must be >= 0
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.gumbel(loc=mean, scale=std, size=size))
return rtn_list
def _get_dist_binomial(self, trials: int, probability: float, size: int=None, seed: int=None) -> list:
"""A binomial discrete random distribution. The Binomial Distribution represents the number of
successes and failures in n independent Bernoulli trials for some given value of n
:param trials: the number of trials to attempt, must be >= 0.
:param probability: the probability distribution, >= 0 and <=1.
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.binomial(n=trials, p=probability, size=size))
return rtn_list
def _get_dist_poisson(self, interval: float, size: int=None, seed: int=None) -> list:
"""A Poisson discrete random distribution.
The Poisson distribution
.. math:: f(k; \lambda)=\frac{\lambda^k e^{-\lambda}}{k!}
For events with an expected separation :math:`\lambda` the Poisson
distribution :math:`f(k; \lambda)` describes the probability of
:math:`k` events occurring within the observed
interval :math:`\lambda`.
Because the output is limited to the range of the C int64 type, a
ValueError is raised when `lam` is within 10 sigma of the maximum
representable value.
:param interval: Expectation of interval, must be >= 0.
:param size: the size of the sample.
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.poisson(lam=interval, size=size))
return rtn_list
def _get_dist_bernoulli(self, probability: float, size: int=None, seed: int=None) -> list:
"""A Bernoulli discrete random distribution using scipy
:param probability: the probability occurrence
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
rtn_list = list(stats.bernoulli.rvs(p=probability, size=size, random_state=_seed))
return rtn_list
def _get_dist_bounded_normal(self, mean: float, std: float, lower: float, upper: float, precision: int=None,
size: int=None, seed: int=None) -> list:
"""A bounded normal continuous random distribution.
:param mean: the mean of the distribution
:param std: the standard deviation
:param lower: the lower limit of the distribution
:param upper: the upper limit of the distribution
:param precision: the precision of the returned number. if None then assumes int value else float
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
precision = precision if isinstance(precision, int) else 3
_seed = self._seed() if seed is None else seed
rtn_list = stats.truncnorm((lower-mean)/std, (upper-mean)/std, loc=mean, scale=std).rvs(size).round(precision)
return rtn_list
def _get_distribution(self, distribution: str, package: str=None, precision: int=None, size: int=None,
seed: int=None, **kwargs) -> list:
"""returns a number based the distribution type.
:param distribution: The string name of the distribution function from numpy random Generator class
:param package: (optional) The name of the package to use, options are 'numpy' (default) and 'scipy'.
:param precision: (optional) the precision of the returned number
:param size: (optional) the size of the sample
:param seed: (optional) a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
precision = 3 if precision is None else precision
if isinstance(package, str) and package == 'scipy':
rtn_list = eval(f"stats.{distribution}.rvs(size=size, random_state=_seed, **kwargs)", globals(), locals())
else:
generator = np.random.default_rng(seed=_seed)
rtn_list = eval(f"generator.{distribution}(size=size, **kwargs)", globals(), locals())
rtn_list = list(rtn_list.round(precision))
return rtn_list
def _get_selection(self, canonical: Any, column_header: str, relative_freq: list=None, sample_size: int=None,
selection_size: int=None, size: int=None, at_most: bool=None, shuffle: bool=None,
seed: int=None) -> list:
""" returns a random list of values where the selection of those values is taken from a connector source.
:param canonical: a pd.DataFrame as the reference dataframe
:param column_header: the name of the column header to correlate
:param relative_freq: (optional) a weighting pattern of the final selection
:param selection_size: (optional) the selection to take from the sample size, normally used with shuffle
:param sample_size: (optional) the size of the sample to take from the reference file
:param at_most: (optional) the most times a selection should be chosen
:param shuffle: (optional) if the selection should be shuffled before selection. Default is true
:param size: (optional) size of the return. default to 1
:param seed: (optional) a seed value for the random function: default to None
:return: list
The canonical is normally a connector contract str reference or a set of parameter instructions on how to
generate a pd.Dataframe but can be a pd.DataFrame. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
"""
canonical = self._get_canonical(canonical)
_seed = self._seed() if seed is None else seed
if isinstance(canonical, dict):
canonical = pd.DataFrame.from_dict(data=canonical)
if column_header not in canonical.columns:
raise ValueError(f"The column '{column_header}' not found in the canonical")
_values = canonical[column_header].iloc[:sample_size]
if isinstance(selection_size, float) and shuffle:
_values = _values.sample(frac=1, random_state=_seed).reset_index(drop=True)
if isinstance(selection_size, int) and 0 < selection_size < _values.size:
_values = _values.iloc[:selection_size]
return self._get_category(selection=_values.to_list(), relative_freq=relative_freq, size=size, at_most=at_most,
seed=_seed)
def _frame_starter(self, canonical: Any, selection: list=None, headers: [str, list]=None, drop: bool=None,
dtype: [str, list]=None, exclude: bool=None, regex: [str, list]=None, re_ignore_case: bool=None,
rename_map: dict=None, default_size: int=None, seed: int=None) -> pd.DataFrame:
""" Selects rows and/or columns changing the shape of the DatFrame. This is always run last in a pipeline
Rows are filtered before the column filter so columns can be referenced even though they might not be included
the final column list.
:param canonical: a pd.DataFrame as the reference dataframe
:param selection: a list of selections where conditions are filtered on, executed in list order
An example of a selection with the minimum requirements is: (see 'select2dict(...)')
[{'column': 'genre', 'condition': "=='Comedy'"}]
:param headers: a list of headers to drop or filter on type
:param drop: to drop or not drop the headers
:param dtype: the column types to include or exclusive. Default None else int, float, bool, object, 'number'
:param exclude: to exclude or include the dtypes
:param regex: a regular expression to search the headers. example '^((?!_amt).)*$)' excludes '_amt' columns
:param re_ignore_case: true if the regex should ignore case. Default is False
:param rename_map: a from: to dictionary of headers to rename
:param default_size: if the canonical fails return an empty dataframe with the default index size
:param seed: this is a place holder, here for compatibility across methods
:return: pd.DataFrame
The starter is a pd.DataFrame, a pd.Series or list, a connector contract str reference or a set of
parameter instructions on how to generate a pd.Dataframe. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
"""
canonical = self._get_canonical(canonical, size=default_size)
# not used but in place form method consistency
_seed = self._seed() if seed is None else seed
if isinstance(selection, list):
selection = deepcopy(selection)
# run the select logic
select_idx = self._selection_index(canonical=canonical, selection=selection)
canonical = canonical.iloc[select_idx].reset_index(drop=True)
drop = drop if isinstance(drop, bool) else False
exclude = exclude if isinstance(exclude, bool) else False
re_ignore_case = re_ignore_case if isinstance(re_ignore_case, bool) else False
rtn_frame = Commons.filter_columns(canonical, headers=headers, drop=drop, dtype=dtype, exclude=exclude,
regex=regex, re_ignore_case=re_ignore_case)
if isinstance(rename_map, dict):
rtn_frame.rename(mapper=rename_map, axis='columns', inplace=True)
return rtn_frame
def _frame_selection(self, canonical: Any, selection: list=None, headers: [str, list]=None,
drop: bool=None, dtype: [str, list]=None, exclude: bool=None, regex: [str, list]=None,
re_ignore_case: bool=None, seed: int=None) -> pd.DataFrame:
""" This method always runs at the start of the pipeline, taking a direct or generated pd.DataFrame,
see context notes below, as the foundation canonical of all subsequent steps of the pipeline.
:param canonical: a direct or generated pd.DataFrame. see context notes below
:param selection: a list of selections where conditions are filtered on, executed in list order
An example of a selection with the minimum requirements is: (see 'select2dict(...)')
[{'column': 'genre', 'condition': "=='Comedy'"}]
:param headers: a list of headers to drop or filter on type
:param drop: to drop or not drop the headers
:param dtype: the column types to include or exclusive. Default None else int, float, bool, object, 'number'
:param exclude: to exclude or include the dtypes
:param regex: a regular expression to search the headers. example '^((?!_amt).)*$)' excludes '_amt' columns
:param re_ignore_case: true if the regex should ignore case. Default is False
:param seed: this is a place holder, here for compatibility across methods
:return: pd.DataFrame
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
"""
return self._frame_starter(canonical=canonical, selection=selection, headers=headers, drop=drop, dtype=dtype,
exclude=exclude, regex=regex, re_ignore_case=re_ignore_case, seed=seed)
def _model_custom(self, canonical: Any, code_str: str, seed: int=None, **kwargs):
""" Commonly used for custom methods, takes code string that when executed changes the the canonical returning
the modified canonical. If the method passes returns a pd.Dataframe this will be returned else the assumption is
the canonical has been changed inplace and thus the modified canonical will be returned
When referencing the canonical in the code_str it should be referenced either by use parameter label 'canonical'
or the short cut '@' symbol. kwargs can also be passed into the code string but must be preceded by a '$' symbol
for example:
assume canonical['gender'] = ['M', 'F', 'U']
code_str ='''
\n@['new_gender'] = [True if x in $value else False for x in @[$header]]
\n@['value'] = [4, 5, 6]
'''
where kwargs are header="'gender'" and value=['M', 'F']
:param canonical: a pd.DataFrame as the reference dataframe
:param code_str: an action on those column values. to reference the canonical use '@'
:param seed: (optional) a seed value for the random function: default to None
:param kwargs: a set of kwargs to include in any executable function
:return: a list (optionally a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
_seed = seed if isinstance(seed, int) else self._seed()
local_kwargs = locals()
for k, v in local_kwargs.pop('kwargs', {}).items():
local_kwargs.update({k: v})
code_str = code_str.replace(f'${k}', str(v))
code_str = code_str.replace('@', 'canonical')
df = exec(code_str, globals(), local_kwargs)
if df is None:
return canonical
return df
def _model_iterator(self, canonical: Any, marker_col: str=None, starting_frame: str=None, selection: list=None,
default_action: dict=None, iteration_actions: dict=None, iter_start: int=None,
iter_stop: int=None, seed: int=None) -> pd.DataFrame:
""" This method allows one to model repeating data subset that has some form of action applied per iteration.
The optional marker column must be included in order to apply actions or apply an iteration marker
An example of use might be a recommender generator where a cohort of unique users need to be selected, for
different recommendation strategies but users can be repeated across recommendation strategy
:param canonical: a pd.DataFrame as the reference dataframe
:param marker_col: (optional) the marker column name for the action outcome. default is to not include
:param starting_frame: (optional) a str referencing an existing connector contract name as the base DataFrame
:param selection: (optional) a list of selections where conditions are filtered on, executed in list order
An example of a selection with the minimum requirements is: (see 'select2dict(...)')
[{'column': 'genre', 'condition': "=='Comedy'"}]
:param default_action: (optional) a default action to take on all iterations. defaults to iteration value
:param iteration_actions: (optional) a dictionary of actions where the key is a specific iteration
:param iter_start: (optional) the start value of the range iteration default is 0
:param iter_stop: (optional) the stop value of the range iteration default is start iteration + 1
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: pd.DataFrame
The starting_frame can be a pd.DataFrame, a pd.Series, int or list, a connector contract str reference or a
set of parameter instructions on how to generate a pd.Dataframe. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
Actions are the resulting outcome of the selection (or the default). An action can be just a value or a dict
that executes a intent method such as get_number(). To help build actions there is a helper function called
action2dict(...) that takes a method as a mandatory attribute.
With actions there are special keyword 'method' values:
@header: use a column as the value reference, expects the 'header' key
@constant: use a value constant, expects the key 'value'
@sample: use to get sample values, expected 'name' of the Sample method, optional 'shuffle' boolean
@eval: evaluate a code string, expects the key 'code_str' and any locals() required
An example of a simple action to return a selection from a list:
{'method': 'get_category', selection: ['M', 'F', 'U']}
This same action using the helper method would look like:
inst.action2dict(method='get_category', selection=['M', 'F', 'U'])
an example of using the helper method, in this example we use the keyword @header to get a value from another
column at the same index position:
inst.action2dict(method="@header", header='value')
We can even execute some sort of evaluation at run time:
inst.action2dict(method="@eval", code_str='sum(values)', values=[1,4,2,1])
"""
canonical = self._get_canonical(canonical)
rtn_frame = self._get_canonical(starting_frame)
_seed = self._seed() if seed is None else seed
iter_start = iter_start if isinstance(iter_start, int) else 0
iter_stop = iter_stop if isinstance(iter_stop, int) and iter_stop > iter_start else iter_start + 1
default_action = default_action if isinstance(default_action, dict) else 0
iteration_actions = iteration_actions if isinstance(iteration_actions, dict) else {}
for counter in range(iter_start, iter_stop):
df_count = canonical.copy()
# selection
df_count = self._frame_selection(df_count, selection=selection, seed=_seed)
# actions
if isinstance(marker_col, str):
if counter in iteration_actions.keys():
_action = iteration_actions.get(counter, None)
df_count[marker_col] = self._apply_action(df_count, action=_action, seed=_seed)
else:
default_action = default_action if isinstance(default_action, dict) else counter
df_count[marker_col] = self._apply_action(df_count, action=default_action, seed=_seed)
rtn_frame = pd.concat([rtn_frame, df_count], ignore_index=True)
return rtn_frame
def _model_group(self, canonical: Any, headers: [str, list], group_by: [str, list], aggregator: str=None,
list_choice: int=None, list_max: int=None, drop_group_by: bool=False, seed: int=None,
include_weighting: bool=False, freq_precision: int=None, remove_weighting_zeros: bool=False,
remove_aggregated: bool=False) -> pd.DataFrame:
""" returns the full column values directly from another connector data source. in addition the the
standard groupby aggregators there is also 'list' and 'set' that returns an aggregated list or set.
These can be using in conjunction with 'list_choice' and 'list_size' allows control of the return values.
if list_max is set to 1 then a single value is returned rather than a list of size 1.
:param canonical: a pd.DataFrame as the reference dataframe
:param headers: the column headers to apply the aggregation too
:param group_by: the column headers to group by
:param aggregator: (optional) the aggregator as a function of Pandas DataFrame 'groupby' or 'list' or 'set'
:param list_choice: (optional) used in conjunction with list or set aggregator to return a random n choice
:param list_max: (optional) used in conjunction with list or set aggregator restricts the list to a n size
:param drop_group_by: (optional) drops the group by headers
:param include_weighting: (optional) include a percentage weighting column for each
:param freq_precision: (optional) a precision for the relative_freq values
:param remove_aggregated: (optional) if used in conjunction with the weighting then drops the aggregator column
:param remove_weighting_zeros: (optional) removes zero values
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
freq_precision = freq_precision if isinstance(freq_precision, int) else 3
aggregator = aggregator if isinstance(aggregator, str) else 'sum'
headers = Commons.list_formatter(headers)
group_by = Commons.list_formatter(group_by)
df_sub = Commons.filter_columns(canonical, headers=headers + group_by).dropna()
if aggregator.startswith('set') or aggregator.startswith('list'):
df_tmp = df_sub.groupby(group_by)[headers[0]].apply(eval(aggregator)).apply(lambda x: list(x))
df_tmp = df_tmp.reset_index()
for idx in range(1, len(headers)):
result = df_sub.groupby(group_by)[headers[idx]].apply(eval(aggregator)).apply(lambda x: list(x))
df_tmp = df_tmp.merge(result, how='left', left_on=group_by, right_index=True)
for idx in range(len(headers)):
header = headers[idx]
if isinstance(list_choice, int):
df_tmp[header] = df_tmp[header].apply(lambda x: generator.choice(x, size=list_choice))
if isinstance(list_max, int):
df_tmp[header] = df_tmp[header].apply(lambda x: x[0] if list_max == 1 else x[:list_max])
df_sub = df_tmp
else:
df_sub = df_sub.groupby(group_by, as_index=False).agg(aggregator)
if include_weighting:
df_sub['sum'] = df_sub.sum(axis=1, numeric_only=True)
total = df_sub['sum'].sum()
df_sub['weighting'] = df_sub['sum'].\
apply(lambda x: round((x / total), freq_precision) if isinstance(x, (int, float)) else 0)
df_sub = df_sub.drop(columns='sum')
if remove_weighting_zeros:
df_sub = df_sub[df_sub['weighting'] > 0]
df_sub = df_sub.sort_values(by='weighting', ascending=False)
if remove_aggregated:
df_sub = df_sub.drop(headers, axis=1)
if drop_group_by:
df_sub = df_sub.drop(columns=group_by, errors='ignore')
return df_sub
def _model_merge(self, canonical: Any, other: Any, left_on: str=None, right_on: str=None,
on: str=None, how: str=None, headers: list=None, suffixes: tuple=None, indicator: bool=None,
validate: str=None, seed: int=None) -> pd.DataFrame:
""" returns the full column values directly from another connector data source. The indicator parameter can be
used to mark the merged items.
:param canonical: a pd.DataFrame as the reference dataframe
:param other: a direct or generated pd.DataFrame. see context notes below
:param left_on: the canonical key column(s) to join on
:param right_on: the merging dataset key column(s) to join on
:param on: if th left and right join have the same header name this can replace left_on and right_on
:param how: (optional) One of 'left', 'right', 'outer', 'inner'. Defaults to inner. See below for more detailed
description of each method.
:param headers: (optional) a filter on the headers included from the right side
:param suffixes: (optional) A tuple of string suffixes to apply to overlapping columns. Defaults ('', '_dup').
:param indicator: (optional) Add a column to the output DataFrame called _merge with information on the source
of each row. _merge is Categorical-type and takes on a value of left_only for observations whose
merge key only appears in 'left' DataFrame or Series, right_only for observations whose merge key
only appears in 'right' DataFrame or Series, and both if the observation’s merge key is found
in both.
:param validate: (optional) validate : string, default None. If specified, checks if merge is of specified type.
“one_to_one” or “1:1”: checks if merge keys are unique in both left and right datasets.
“one_to_many” or “1:m”: checks if merge keys are unique in left dataset.
“many_to_one” or “m:1”: checks if merge keys are unique in right dataset.
“many_to_many” or “m:m”: allowed, but does not result in checks.
:param seed: this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
The other is a pd.DataFrame, a pd.Series, int or list, a connector contract str reference or a set of
parameter instructions on how to generate a pd.Dataframe. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
"""
# Code block for intent
canonical = self._get_canonical(canonical)
other = self._get_canonical(other, size=canonical.shape[0])
_seed = self._seed() if seed is None else seed
how = how if isinstance(how, str) and how in ['left', 'right', 'outer', 'inner'] else 'inner'
indicator = indicator if isinstance(indicator, bool) else False
suffixes = suffixes if isinstance(suffixes, tuple) and len(suffixes) == 2 else ('', '_dup')
# Filter on the columns
if isinstance(headers, list):
headers.append(right_on if isinstance(right_on, str) else on)
other = Commons.filter_columns(other, headers=headers)
df_rtn = pd.merge(left=canonical, right=other, how=how, left_on=left_on, right_on=right_on, on=on,
suffixes=suffixes, indicator=indicator, validate=validate)
return df_rtn
def _model_concat(self, canonical: Any, other: Any, as_rows: bool=None, headers: [str, list]=None,
drop: bool=None, dtype: [str, list]=None, exclude: bool=None, regex: [str, list]=None,
re_ignore_case: bool=None, shuffle: bool=None, seed: int=None) -> pd.DataFrame:
""" returns the full column values directly from another connector data source.
:param canonical: a pd.DataFrame as the reference dataframe
:param other: a direct or generated pd.DataFrame. see context notes below
:param as_rows: (optional) how to concatenate, True adds the connector dataset as rows, False as columns
:param headers: (optional) a filter of headers from the 'other' dataset
:param drop: (optional) to drop or not drop the headers if specified
:param dtype: (optional) a filter on data type for the 'other' dataset. int, float, bool, object
:param exclude: (optional) to exclude or include the data types if specified
:param regex: (optional) a regular expression to search the headers. example '^((?!_amt).)*$)' excludes '_amt'
:param re_ignore_case: (optional) true if the regex should ignore case. Default is False
:param shuffle: (optional) if the rows in the loaded canonical should be shuffled
:param seed: this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
The other is a pd.DataFrame, a pd.Series, int or list, a connector contract str reference or a set of
parameter instructions on how to generate a pd.Dataframe. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
"""
canonical = self._get_canonical(canonical)
other = self._get_canonical(other, size=canonical.shape[0])
_seed = self._seed() if seed is None else seed
shuffle = shuffle if isinstance(shuffle, bool) else False
as_rows = as_rows if isinstance(as_rows, bool) else False
# Filter on the columns
df_rtn = Commons.filter_columns(df=other, headers=headers, drop=drop, dtype=dtype, exclude=exclude,
regex=regex, re_ignore_case=re_ignore_case, copy=False)
if shuffle:
df_rtn.sample(frac=1, random_state=_seed).reset_index(drop=True)
if canonical.shape[0] <= df_rtn.shape[0]:
df_rtn = df_rtn.iloc[:canonical.shape[0]]
axis = 'index' if as_rows else 'columns'
return pd.concat([canonical, df_rtn], axis=axis)
def _model_dict_column(self, canonical: Any, header: str, convert_str: bool=None, replace_null: Any=None,
seed: int=None) -> pd.DataFrame:
""" takes a column that contains dict and expands them into columns. Note, the column must be a flat dictionary.
Complex structures will not work.
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header of the column to be convert
:param convert_str: (optional) if the header has the dict as a string convert to dict using ast.literal_eval()
:param replace_null: (optional) after conversion, replace null values with this value
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: pd.DataFrame
"""
canonical = self._get_canonical(canonical)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
_seed = self._seed() if seed is None else seed
convert_str = convert_str if isinstance(convert_str, bool) else False
# replace NaN with '{}' if the column is strings, otherwise replace with {}
if convert_str:
canonical[header] = canonical[header].fillna('{}').apply(ast.literal_eval)
else:
canonical[header] = canonical[header].fillna({i: {} for i in canonical.index})
# convert the key/values into columns (this is the fasted code)
result = pd.json_normalize(canonical[header])
if isinstance(replace_null, (int, float, str)):
result.replace(np.nan, replace_null, inplace=True)
return canonical.join(result).drop(columns=[header])
def _model_explode(self, canonical: Any, header: str, seed: int=None) -> pd.DataFrame:
""" takes a single column of list values and explodes the DataFrame so row is represented by each elements
in the row list
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header of the column to be exploded
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
The canonical is a pd.DataFrame, a pd.Series or list, a connector contract str reference or a set of
parameter instructions on how to generate a pd.Dataframe. the description of each is:
"""
canonical = self._get_canonical(canonical)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
_seed = self._seed() if seed is None else seed
return canonical.explode(column=header, ignore_index=True)
def _model_sample(self, canonical: Any, sample: Any, columns_list: list=None, exclude_associate: list=None,
auto_transition: bool=None, detail_numeric: bool=None, strict_typing: bool=None,
category_limit: int=None, apply_bias: bool=None, seed: int = None) -> pd.DataFrame:
""" Takes a sample dataset and using analytics, builds a set of synthetic columns that are representative of
the sample but scaled to the size of the canonical
:param canonical:
:param sample:
:param columns_list:
:param exclude_associate:
:param auto_transition:
:param detail_numeric:
:param strict_typing:
:param category_limit:
:param apply_bias:
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
sample = self._get_canonical(sample)
auto_transition = auto_transition if isinstance(auto_transition, bool) else True
columns_list = columns_list if isinstance(columns_list, list) else list(sample.columns)
sample = Commons.filter_columns(sample, headers=columns_list)
if auto_transition:
Transition.from_memory().cleaners.auto_transition(sample, inplace=True)
blob = DataDiscovery.analyse_association(sample, columns_list=columns_list, exclude_associate=exclude_associate,
detail_numeric=detail_numeric, strict_typing=strict_typing,
category_limit=category_limit)
return self._model_analysis(canonical=canonical, analytics_blob=blob, apply_bias=apply_bias, seed=seed)
def _model_script(self, canonical: Any, script_contract: str, seed: int = None) -> pd.DataFrame:
"""Takes a synthetic build script and using analytics, builds a set of synthetic columns that are that are
defined by the build script and scaled to the size of the canonical
:param canonical:
:param script_contract:
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
script = self._get_canonical(script_contract)
type_options = {'number': '_get_number', 'date': '_get_datetime', 'category': 'get_category',
'selection': 'get_selection', 'intervals': 'get_intervals', 'distribution': 'get_distribution'}
script['params'] = script['params'].replace(['', ' '], np.nan)
script['params'].loc[script['params'].isna()] = '[]'
script['params'] = [ast.literal_eval(x) if isinstance(x, str) and x.startswith('[') and x.endswith(']')
else x for x in script['params']]
# replace all other items with list
script['params'] = [x if isinstance(x, list) else [x] for x in script['params']]
script['params'] = script['params'].astype('object')
for index, row in script.iterrows():
method = type_options.get(row['type'])
params = row['params']
canonical[row['name']] = eval(f"self.{method}(size={canonical.shape[0]}, **params)", globals(), locals())
return canonical
def _model_analysis(self, canonical: Any, analytics_blob: dict, apply_bias: bool=None,
seed: int=None) -> pd.DataFrame:
""" builds a set of columns based on an analysis dictionary of weighting (see analyse_association)
if a reference DataFrame is passed then as the analysis is run if the column already exists the row
value will be taken as the reference to the sub category and not the random value. This allows already
constructed association to be used as reference for a sub category.
:param canonical: a pd.DataFrame as the reference dataframe
:param analytics_blob: the analytics blob from DataDiscovery.analyse_association(...)
:param apply_bias: (optional) if dominant values have been excluded, re-include to maintain bias
:param seed: seed: (optional) a seed value for the random function: default to None
:return: a DataFrame
"""
def get_level(analysis: dict, sample_size: int, _seed: int=None):
_seed = self._seed(seed=_seed, increment=True)
for name, values in analysis.items():
if row_dict.get(name) is None:
row_dict[name] = list()
_analysis = DataAnalytics(analysis=values.get('insight', {}))
result_type = object
if str(_analysis.intent.dtype).startswith('cat'):
result_type = 'category'
result = self._get_category(selection=_analysis.intent.categories,
relative_freq=_analysis.patterns.get('relative_freq', None),
seed=_seed, size=sample_size)
elif str(_analysis.intent.dtype).startswith('num'):
result_type = 'int' if _analysis.params.precision == 0 else 'float'
result = self._get_intervals(intervals=[tuple(x) for x in _analysis.intent.intervals],
relative_freq=_analysis.patterns.get('relative_freq', None),
precision=_analysis.params.get('precision', None),
seed=_seed, size=sample_size)
elif str(_analysis.intent.dtype).startswith('date'):
result_type = 'object' if _analysis.params.is_element('data_format') else 'date'
result = self._get_datetime(start=_analysis.stats.lowest,
until=_analysis.stats.highest,
relative_freq=_analysis.patterns.get('relative_freq', None),
date_format=_analysis.params.get('data_format', None),
day_first=_analysis.params.get('day_first', None),
year_first=_analysis.params.get('year_first', None),
seed=_seed, size=sample_size)
else:
result = []
# if the analysis was done with excluding dominance then se if they should be added back
if apply_bias and _analysis.patterns.is_element('dominant_excluded'):
_dom_percent = _analysis.patterns.dominant_percent/100
_dom_values = _analysis.patterns.dominant_excluded
if len(_dom_values) > 0:
s_values = pd.Series(result, dtype=result_type)
non_zero = s_values[~s_values.isin(_dom_values)].index
choice_size = int((s_values.size * _dom_percent) - (s_values.size - len(non_zero)))
if choice_size > 0:
generator = np.random.default_rng(_seed)
_dom_choice = generator.choice(_dom_values, size=choice_size)
s_values.iloc[generator.choice(non_zero, size=choice_size, replace=False)] = _dom_choice
result = s_values.to_list()
# now add the result to the row_dict
row_dict[name] += result
if sum(_analysis.patterns.relative_freq) == 0:
unit = 0
else:
unit = sample_size / sum(_analysis.patterns.relative_freq)
if values.get('sub_category'):
leaves = values.get('branch', {}).get('leaves', {})
for idx in range(len(leaves)):
section_size = int(round(_analysis.patterns.relative_freq[idx] * unit, 0)) + 1
next_item = values.get('sub_category').get(leaves[idx])
get_level(next_item, section_size, _seed)
return
canonical = self._get_canonical(canonical)
apply_bias = apply_bias if isinstance(apply_bias, bool) else True
row_dict = dict()
seed = self._seed() if seed is None else seed
size = canonical.shape[0]
get_level(analytics_blob, sample_size=size, _seed=seed)
for key in row_dict.keys():
row_dict[key] = row_dict[key][:size]
return pd.concat([canonical, pd.DataFrame.from_dict(data=row_dict)], axis=1)
def _model_encoding(self, canonical: Any, headers: [str, list], encoding: bool=None, ordinal: dict=None,
prefix=None, dtype: Any=None, prefix_sep: str=None, dummy_na: bool=False,
drop_first: bool=False, seed: int=None) -> pd.DataFrame:
""" encodes categorical data types, by default, as dummy encoded but optionally can choose label
encoding
:param canonical: a pd.DataFrame as the reference dataframe
:param headers: the header(s) to apply multi-hot
:param encoding: the type of encoding to apply to the categories, types supported 'dummy', 'ordinal', 'label'
:param ordinal: a dictionary of ordinal encoding. encoding must be 'ordinal', if not mapped then returns null
:param prefix : str, list of str, or dict of str, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
:param prefix_sep : str, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix`.
:param dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
:param drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
:param dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
:param seed: seed: (optional) a seed value for the random function: default to None
:return: a pd.Dataframe
"""
# intend code block on the canonical
canonical = self._get_canonical(canonical)
headers = Commons.list_formatter(headers)
seed = self._seed() if seed is None else seed
encoding = encoding if isinstance(encoding, str) and encoding in ['label', 'ordinal'] else 'dummy'
prefix = prefix if isinstance(prefix, str) else None
prefix_sep = prefix_sep if isinstance(prefix_sep, str) else "_"
dummy_na = dummy_na if isinstance(dummy_na, bool) else False
drop_first = drop_first if isinstance(drop_first, bool) else False
dtype = dtype if dtype else np.uint8
for header in headers:
if canonical[header].dtype.name != 'category':
canonical[header] = canonical[header].astype('category')
if encoding == 'ordinal':
ordinal = ordinal if isinstance(ordinal, dict) else {}
canonical[header] = canonical[header].map(ordinal, na_action=np.nan)
elif encoding == 'label':
canonical[f"{prefix}{prefix_sep}{header}"] = canonical[header].cat.codes
if encoding == 'dummy':
dummy_df = pd.get_dummies(canonical, columns=headers, prefix=prefix, prefix_sep=prefix_sep,
dummy_na=dummy_na, drop_first=drop_first, dtype=dtype)
for name in dummy_df.columns:
canonical[name] = dummy_df[name]
return canonical
def _correlate_selection(self, canonical: Any, selection: list, action: [str, int, float, dict],
default_action: [str, int, float, dict]=None, seed: int=None, rtn_type: str=None):
""" returns a value set based on the selection list and the action enacted on that selection. If
the selection criteria is not fulfilled then the default_action is taken if specified, else null value.
If a DataFrame is not passed, the values column is referenced by the header '_default'
:param canonical: a pd.DataFrame as the reference dataframe
:param selection: a list of selections where conditions are filtered on, executed in list order
An example of a selection with the minimum requirements is: (see 'select2dict(...)')
[{'column': 'genre', 'condition': "=='Comedy'"}]
:param action: a value or dict to act upon if the select is successful. see below for more examples
An example of an action as a dict: (see 'action2dict(...)')
{'method': 'get_category', 'selection': ['M', 'F', 'U']}
:param default_action: (optional) a default action to take if the selection is not fulfilled
:param seed: (optional) a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: value set based on the selection list and the action
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
Actions are the resulting outcome of the selection (or the default). An action can be just a value or a dict
that executes a intent method such as get_number(). To help build actions there is a helper function called
action2dict(...) that takes a method as a mandatory attribute.
With actions there are special keyword 'method' values:
@header: use a column as the value reference, expects the 'header' key
@constant: use a value constant, expects the key 'value'
@sample: use to get sample values, expected 'name' of the Sample method, optional 'shuffle' boolean
@eval: evaluate a code string, expects the key 'code_str' and any locals() required
An example of a simple action to return a selection from a list:
{'method': 'get_category', selection: ['M', 'F', 'U']}
This same action using the helper method would look like:
inst.action2dict(method='get_category', selection=['M', 'F', 'U'])
an example of using the helper method, in this example we use the keyword @header to get a value from another
column at the same index position:
inst.action2dict(method="@header", header='value')
We can even execute some sort of evaluation at run time:
inst.action2dict(method="@eval", code_str='sum(values)', values=[1,4,2,1])
"""
canonical = self._get_canonical(canonical)
if len(canonical) == 0:
raise TypeError("The canonical given is empty")
if not isinstance(selection, list):
raise ValueError("The 'selection' parameter must be a 'list' of 'dict' types")
if not isinstance(action, (str, int, float, dict)) or (isinstance(action, dict) and len(action) == 0):
raise TypeError("The 'action' parameter is not of an accepted format or is empty")
_seed = seed if isinstance(seed, int) else self._seed()
# prep the values to be a DataFrame if it isn't already
action = deepcopy(action)
selection = deepcopy(selection)
# run the logic
select_idx = self._selection_index(canonical=canonical, selection=selection)
if not isinstance(default_action, (str, int, float, dict)):
default_action = None
rtn_values = self._apply_action(canonical, action=default_action, seed=_seed)
# deal with categories
is_category = False
if rtn_values.dtype.name == 'category':
is_category = True
rtn_values = rtn_values.astype('object')
rtn_values.update(self._apply_action(canonical, action=action, select_idx=select_idx, seed=_seed))
if is_category:
rtn_values = rtn_values.astype('category')
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
rtn_values = rtn_values.astype(rtn_type)
return rtn_values
return rtn_values.to_list()
def _correlate_custom(self, canonical: Any, code_str: str, seed: int=None, **kwargs):
""" Commonly used for custom list comprehension, takes code string that when evaluated returns a list of values
When referencing the canonical in the code_str it should be referenced either by use parameter label 'canonical'
or the short cut '@' symbol.
for example:
code_str = "[x + 2 for x in @['A']]" # where 'A' is a header in the canonical
kwargs can also be passed into the code string but must be preceded by a '$' symbol
for example:
code_str = "[True if x == $v1 else False for x in @['A']]" # where 'v1' is a kwargs
:param canonical: a pd.DataFrame as the reference dataframe
:param code_str: an action on those column values. to reference the canonical use '@'
:param seed: (optional) a seed value for the random function: default to None
:param kwargs: a set of kwargs to include in any executable function
:return: a list (optionally a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
_seed = seed if isinstance(seed, int) else self._seed()
local_kwargs = locals()
for k, v in local_kwargs.pop('kwargs', {}).items():
local_kwargs.update({k: v})
code_str = code_str.replace(f'${k}', str(v))
code_str = code_str.replace('@', 'canonical')
rtn_values = eval(code_str, globals(), local_kwargs)
if rtn_values is None:
return [np.nan] * canonical.shape[0]
return rtn_values
def _correlate_aggregate(self, canonical: Any, headers: list, agg: str, seed: int=None, precision: int=None,
rtn_type: str=None):
""" correlate two or more columns with each other through a finite set of aggregation functions. The
aggregation function names are limited to 'sum', 'prod', 'count', 'min', 'max' and 'mean' for numeric columns
and a special 'list' function name to combine the columns as a list
:param canonical: a pd.DataFrame as the reference dataframe
:param headers: a list of headers to correlate
:param agg: the aggregation function name enact. The available functions are:
'sum', 'prod', 'count', 'min', 'max', 'mean' and 'list' which combines the columns as a list
:param precision: the value precision of the return values
:param seed: (optional) a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: a list of equal length to the one passed
"""
canonical = self._get_canonical(canonical)
if not isinstance(headers, list) or len(headers) < 2:
raise ValueError("The headers value must be a list of at least two header str")
if agg not in ['sum', 'prod', 'count', 'min', 'max', 'mean', 'list']:
raise ValueError("The only allowed func values are 'sum', 'prod', 'count', 'min', 'max', 'mean', 'list'")
# Code block for intent
_seed = seed if isinstance(seed, int) else self._seed()
precision = precision if isinstance(precision, int) else 3
if agg == 'list':
return canonical.loc[:, headers].values.tolist()
rtn_values = eval(f"canonical.loc[:, headers].{agg}(axis=1)", globals(), locals()).round(precision)
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
rtn_values = rtn_values.astype(rtn_type)
return rtn_values
return rtn_values.to_list()
def _correlate_choice(self, canonical: Any, header: str, list_size: int=None, random_choice: bool=None,
replace: bool=None, shuffle: bool=None, convert_str: bool=None, seed: int=None,
rtn_type: str=None):
""" correlate a column where the elements of the columns contains a list, and a choice is taken from that list.
if the list_size == 1 then a single value is correlated otherwise a list is correlated
Null values are passed through but all other elements must be a list with at least 1 value in.
if 'random' is true then all returned values will be a random selection from the list and of equal length.
if 'random' is false then each list will not exceed the 'list_size'
Also if 'random' is true and 'replace' is False then all lists must have more elements than the list_size.
By default 'replace' is True and 'shuffle' is False.
In addition 'convert_str' allows lists that have been formatted as a string can be converted from a string
to a list using 'ast.literal_eval(x)'
:param canonical: a pd.DataFrame as the reference dataframe
:param header: The header containing a list to chose from.
:param list_size: (optional) the number of elements to return, if more than 1 then list
:param random_choice: (optional) if the choice should be a random choice.
:param replace: (optional) if the choice selection should be replaced or selected only once
:param shuffle: (optional) if the final list should be shuffled
:param convert_str: if the header has the list as a string convert to list using ast.literal_eval()
:param seed: (optional) a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: a list of equal length to the one passed
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
# Code block for intent
list_size = list_size if isinstance(list_size, int) else 1
random_choice = random_choice if isinstance(random_choice, bool) else False
convert_str = convert_str if isinstance(convert_str, bool) else False
replace = replace if isinstance(replace, bool) else True
shuffle = shuffle if isinstance(shuffle, bool) else False
_seed = seed if isinstance(seed, int) else self._seed()
s_values = canonical[header].copy()
if s_values.empty:
return list()
s_idx = s_values.where(~s_values.isna()).dropna().index
if convert_str:
s_values.iloc[s_idx] = [ast.literal_eval(x) if isinstance(x, str) else x for x in s_values.iloc[s_idx]]
s_values.iloc[s_idx] = Commons.list_formatter(s_values.iloc[s_idx])
generator = np.random.default_rng(seed=_seed)
if random_choice:
try:
s_values.iloc[s_idx] = [generator.choice(x, size=list_size, replace=replace, shuffle=shuffle)
for x in s_values.iloc[s_idx]]
except ValueError:
raise ValueError(f"Unable to make a choice. Ensure {header} has all appropriate values for the method")
s_values.iloc[s_idx] = [x[0] if list_size == 1 else list(x) for x in s_values.iloc[s_idx]]
else:
s_values.iloc[s_idx] = [x[:list_size] if list_size > 1 else x[0] for x in s_values.iloc[s_idx]]
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
s_values = s_values.astype(rtn_type)
return s_values
return s_values.to_list()
def _correlate_join(self, canonical: Any, header: str, action: [str, dict], sep: str=None, seed: int=None,
rtn_type: str=None):
""" correlate a column and join it with the result of the action, This allows for composite values to be
build from. an example might be to take a forename and add the surname with a space separator to create a
composite name field, of to join two primary keys to create a single composite key.
:param canonical: a pd.DataFrame as the reference dataframe
:param header: an ordered list of columns to join
:param action: (optional) a string or a single action whose outcome will be joined to the header value
:param sep: (optional) a separator between the values
:param seed: (optional) a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: a list of equal length to the one passed
Actions are the resulting outcome of the selection (or the default). An action can be just a value or a dict
that executes a intent method such as get_number(). To help build actions there is a helper function called
action2dict(...) that takes a method as a mandatory attribute.
With actions there are special keyword 'method' values:
@header: use a column as the value reference, expects the 'header' key
@constant: use a value constant, expects the key 'value'
@sample: use to get sample values, expected 'name' of the Sample method, optional 'shuffle' boolean
@eval: evaluate a code string, expects the key 'code_str' and any locals() required
An example of a simple action to return a selection from a list:
{'method': 'get_category', selection=['M', 'F', 'U']
an example of using the helper method, in this example we use the keyword @header to get a value from another
column at the same index position:
inst.action2dict(method="@header", header='value')
We can even execute some sort of evaluation at run time:
inst.action2dict(method="@eval", code_str='sum(values)', values=[1,4,2,1])
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(action, (dict, str)):
raise ValueError(f"The action must be a dictionary of a single action or a string value")
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
# Code block for intent
_seed = seed if isinstance(seed, int) else self._seed()
sep = sep if isinstance(sep, str) else ''
s_values = canonical[header].copy()
if s_values.empty:
return list()
action = deepcopy(action)
null_idx = s_values[s_values.isna()].index
s_values.to_string()
result = self._apply_action(canonical, action=action, seed=_seed)
s_values = pd.Series([f"{a}{sep}{b}" for (a, b) in zip(s_values, result)], dtype='object')
if null_idx.size > 0:
s_values.iloc[null_idx] = np.nan
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
s_values = s_values.astype(rtn_type)
return s_values
return s_values.to_list()
def _correlate_sigmoid(self, canonical: Any, header: str, precision: int=None, seed: int=None,
rtn_type: str=None):
""" logistic sigmoid a.k.a logit, takes an array of real numbers and transforms them to a value
between (0,1) and is defined as
f(x) = 1/(1+exp(-x)
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param precision: (optional) how many decimal places. default to 3
:param seed: (optional) the random seed. defaults to current datetime
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: an equal length list of correlated values
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
s_values = canonical[header].copy()
if s_values.empty:
return list()
precision = precision if isinstance(precision, int) else 3
_seed = seed if isinstance(seed, int) else self._seed()
rtn_values = np.round(1 / (1 + np.exp(-s_values)), precision)
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
rtn_values = rtn_values.astype(rtn_type)
return rtn_values
return rtn_values.to_list()
def _correlate_polynomial(self, canonical: Any, header: str, coefficient: list, seed: int=None,
rtn_type: str=None, keep_zero: bool=None) -> list:
""" creates a polynomial using the reference header values and apply the coefficients where the
index of the list represents the degree of the term in reverse order.
e.g [6, -2, 0, 4] => f(x) = 4x**3 - 2x + 6
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param coefficient: the reverse list of term coefficients
:param seed: (optional) the random seed. defaults to current datetime
:param keep_zero: (optional) if True then zeros passed remain zero, Default is False
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: an equal length list of correlated values
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
s_values = canonical[header].copy()
if s_values.empty:
return list()
keep_zero = keep_zero if isinstance(keep_zero, bool) else False
_seed = seed if isinstance(seed, int) else self._seed()
def _calc_polynomial(x, _coefficient):
if keep_zero and x == 0:
return 0
res = 0
for index, coeff in enumerate(_coefficient):
res += coeff * x ** index
return res
rtn_values = s_values.apply(lambda x: _calc_polynomial(x, coefficient))
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
rtn_values = rtn_values.astype(rtn_type)
return rtn_values
return rtn_values.to_list()
def _correlate_missing(self, canonical: Any, header: str, granularity: [int, float]=None,
as_type: str=None, lower: [int, float]=None, upper: [int, float]=None, nulls_list: list=None,
exclude_dominant: bool=None, replace_zero: [int, float]=None, precision: int=None,
day_first: bool=None, year_first: bool=None, seed: int=None,
rtn_type: str=None):
""" imputes missing data with a weighted distribution based on the analysis of the other elements in the
column
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param granularity: (optional) the granularity of the analysis across the range. Default is 5
int passed - represents the number of periods
float passed - the length of each interval
list[tuple] - specific interval periods e.g []
list[float] - the percentile or quantities, All should fall between 0 and 1
:param as_type: (optional) specify the type to analyse
:param lower: (optional) the lower limit of the number value. Default min()
:param upper: (optional) the upper limit of the number value. Default max()
:param nulls_list: (optional) a list of nulls that should be considered null
:param exclude_dominant: (optional) if overly dominant are to be excluded from analysis to avoid bias (numbers)
:param replace_zero: (optional) with categories, a non-zero minimal chance relative frequency to replace zero
This is useful when the relative frequency of a category is so small the analysis returns zero
:param precision: (optional) by default set to 3.
:param day_first: (optional) if the date provided has day first
:param year_first: (optional) if the date provided has year first
:param seed: (optional) the random seed. defaults to current datetime
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return:
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
s_values = canonical[header].copy()
if s_values.empty:
return list()
as_type = as_type if isinstance(as_type, str) else s_values.dtype.name
_seed = seed if isinstance(seed, int) else self._seed()
nulls_list = nulls_list if isinstance(nulls_list, list) else [np.nan, None, 'nan', '', ' ']
if isinstance(nulls_list, list):
s_values.replace(nulls_list, np.nan, inplace=True, regex=True)
null_idx = s_values[s_values.isna()].index
if as_type.startswith('int') or as_type.startswith('float') or as_type.startswith('num'):
_analysis = DataAnalytics(DataDiscovery.analyse_number(s_values, granularity=granularity, lower=lower,
upper=upper, detail_stats=False, precision=precision,
exclude_dominant=exclude_dominant))
s_values.iloc[null_idx] = self._get_intervals(intervals=[tuple(x) for x in _analysis.intent.intervals],
relative_freq=_analysis.patterns.relative_freq,
precision=_analysis.params.precision,
seed=_seed, size=len(null_idx))
elif as_type.startswith('cat'):
_analysis = DataAnalytics(DataDiscovery.analyse_category(s_values, replace_zero=replace_zero))
s_values.iloc[null_idx] = self._get_category(selection=_analysis.intent.categories,
relative_freq=_analysis.patterns.relative_freq,
seed=_seed, size=len(null_idx))
elif as_type.startswith('date'):
_analysis = DataAnalytics(DataDiscovery.analyse_date(s_values, granularity=granularity, lower=lower,
upper=upper, day_first=day_first,
year_first=year_first))
s_values.iloc[null_idx] = self._get_datetime(start=_analysis.intent.lowest,
until=_analysis.intent.highest,
relative_freq=_analysis.patterns.relative_freq,
date_format=_analysis.params.data_format,
day_first=_analysis.params.day_first,
year_first=_analysis.params.year_first,
seed=_seed, size=len(null_idx))
else:
raise ValueError(f"The data type '{as_type}' is not supported. Try using the 'as_type' parameter")
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
s_values = s_values.astype(rtn_type)
return s_values
return s_values.to_list()
def _correlate_numbers(self, canonical: Any, header: str, to_numeric: bool=None, standardize: bool=None,
normalize: tuple=None, offset: [int, float, str]=None, jitter: float=None,
jitter_freq: list=None, precision: int=None, replace_nulls: [int, float]=None,
seed: int=None, keep_zero: bool=None, min_value: [int, float]=None,
max_value: [int, float]=None, rtn_type: str=None):
""" returns a number that correlates to the value given. The jitter is based on a normal distribution
with the correlated value being the mean and the jitter its standard deviation from that mean
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param to_numeric: (optional) ensures numeric type. None convertable strings are set to null
:param standardize: (optional) if the column should be standardised
:param normalize: (optional) normalise the column between two values. the tuple is the lower and upper bounds
:param offset: (optional) a fixed value to offset or if str an operation to perform using @ as the header value.
:param jitter: (optional) a perturbation of the value where the jitter is a std. defaults to 0
:param jitter_freq: (optional) a relative freq with the pattern mid point the mid point of the jitter
:param precision: (optional) how many decimal places. default to 3
:param replace_nulls: (optional) a numeric value to replace nulls
:param seed: (optional) the random seed. defaults to current datetime
:param keep_zero: (optional) if True then zeros passed remain zero, Default is False
:param min_value: a minimum value not to go below
:param max_value: a max value not to go above
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: an equal length list of correlated values
The offset can be a numeric offset that is added to the value, e.g. passing 2 will add 2 to all values.
If a string is passed if format should be a calculation with the '@' character used to represent the column
value. e.g.
'1-@' would subtract the column value from 1,
'@*0.5' would multiply the column value by 0.5
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
s_values = canonical[header].copy()
if s_values.empty:
return list()
if isinstance(to_numeric, bool) and to_numeric:
s_values = pd.to_numeric(s_values.apply(str).str.replace('[$£€, ]', '', regex=True), errors='coerce')
if not (s_values.dtype.name.startswith('int') or s_values.dtype.name.startswith('float')):
raise ValueError(f"The header column is of type '{s_values.dtype.name}' and not numeric. "
f"Use the 'to_numeric' parameter if appropriate")
keep_zero = keep_zero if isinstance(keep_zero, bool) else False
precision = precision if isinstance(precision, int) else 3
_seed = seed if isinstance(seed, int) else self._seed()
if isinstance(replace_nulls, (int, float)):
s_values[s_values.isna()] = replace_nulls
null_idx = s_values[s_values.isna()].index
zero_idx = s_values.where(s_values == 0).dropna().index if keep_zero else []
if isinstance(offset, (int, float)) and offset != 0:
s_values = s_values.add(offset)
elif isinstance(offset, str):
offset = offset.replace("@", 'x')
s_values = s_values.apply(lambda x: eval(offset))
if isinstance(jitter, (int, float)) and jitter != 0:
sample = self._get_number(-abs(jitter) / 2, abs(jitter) / 2, relative_freq=jitter_freq,
size=s_values.size, seed=_seed)
s_values = s_values.add(sample)
if isinstance(min_value, (int, float)):
if min_value < s_values.max():
min_idx = s_values.dropna().where(s_values < min_value).dropna().index
s_values.iloc[min_idx] = min_value
else:
raise ValueError(f"The min value {min_value} is greater than the max result value {s_values.max()}")
if isinstance(max_value, (int, float)):
if max_value > s_values.min():
max_idx = s_values.dropna().where(s_values > max_value).dropna().index
s_values.iloc[max_idx] = max_value
else:
raise ValueError(f"The max value {max_value} is less than the min result value {s_values.min()}")
if isinstance(standardize, bool) and standardize:
s_values = pd.Series(Commons.list_standardize(s_values.to_list()))
if isinstance(normalize, tuple):
if normalize[0] >= normalize[1] or len(normalize) != 2:
raise ValueError("The normalize tuple must be of size 2 with the first value lower than the second")
s_values = pd.Series(Commons.list_normalize(s_values.to_list(), normalize[0], normalize[1]))
# reset the zero values if any
s_values.iloc[zero_idx] = 0
s_values = s_values.round(precision)
if precision == 0 and not s_values.isnull().any():
s_values = s_values.astype(int)
if null_idx.size > 0:
s_values.iloc[null_idx] = np.nan
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
s_values = s_values.astype(rtn_type)
return s_values
return s_values.to_list()
def _correlate_categories(self, canonical: Any, header: str, correlations: list, actions: dict,
default_action: [str, int, float, dict]=None, seed: int=None, rtn_type: str=None):
""" correlation of a set of values to an action, the correlations must map to the dictionary index values.
Note. to use the current value in the passed values as a parameter value pass an empty dict {} as the keys
value. If you want the action value to be the current value of the passed value then again pass an empty dict
action to be the current value
simple correlation list:
['A', 'B', 'C'] # if values is 'A' then action is 0 and so on
multiple choice correlation:
[['A','B'], 'C'] # if values is 'A' OR 'B' then action is 0 and so on
For more complex correlation the selection logic can be used, see notes below.
for actions also see notes below.
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param correlations: a list of categories (can also contain lists for multiple correlations.
:param actions: the correlated set of categories that should map to the index
:param default_action: (optional) a default action to take if the selection is not fulfilled
:param seed: a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: a list of equal length to the one passed
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
Actions are the resulting outcome of the selection (or the default). An action can be just a value or a dict
that executes a intent method such as get_number(). To help build actions there is a helper function called
action2dict(...) that takes a method as a mandatory attribute.
With actions there are special keyword 'method' values:
@header: use a column as the value reference, expects the 'header' key
@constant: use a value constant, expects the key 'value'
@sample: use to get sample values, expected 'name' of the Sample method, optional 'shuffle' boolean
@eval: evaluate a code string, expects the key 'code_str' and any locals() required
An example of a simple action to return a selection from a list:
{'method': 'get_category', selection: ['M', 'F', 'U']}
This same action using the helper method would look like:
inst.action2dict(method='get_category', selection=['M', 'F', 'U'])
an example of using the helper method, in this example we use the keyword @header to get a value from another
column at the same index position:
inst.action2dict(method="@header", header='value')
We can even execute some sort of evaluation at run time:
inst.action2dict(method="@eval", code_str='sum(values)', values=[1,4,2,1])
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
_seed = seed if isinstance(seed, int) else self._seed()
actions = deepcopy(actions)
correlations = deepcopy(correlations)
corr_list = []
for corr in correlations:
corr_list.append(Commons.list_formatter(corr))
if not isinstance(default_action, (str, int, float, dict)):
default_action = None
rtn_values = self._apply_action(canonical, action=default_action, seed=_seed)
# deal with categories
if rtn_values.dtype.name == 'category':
rtn_values = rtn_values.astype('object')
s_values = canonical[header].copy().astype(str)
for i in range(len(corr_list)):
action = actions.get(i, actions.get(str(i), -1))
if action == -1:
continue
if isinstance(corr_list[i][0], dict):
corr_idx = self._selection_index(canonical, selection=corr_list[i])
else:
corr_idx = s_values[s_values.isin(map(str, corr_list[i]))].index
rtn_values.update(self._apply_action(canonical, action=action, select_idx=corr_idx, seed=_seed))
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
rtn_values = rtn_values.astype(rtn_type)
return rtn_values
return rtn_values.to_list()
def _correlate_dates(self, canonical: Any, header: str, offset: [int, dict]=None, jitter: int=None,
jitter_units: str=None, jitter_freq: list=None, now_delta: str=None, date_format: str=None,
min_date: str=None, max_date: str=None, fill_nulls: bool=None, day_first: bool=None,
year_first: bool=None, seed: int=None, rtn_type: str=None):
""" correlates dates to an existing date or list of dates. The return is a list of pd
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param offset: (optional) and offset to the date. if int then assumed a 'days' offset
int or dictionary associated with pd. eg {'days': 1}
:param jitter: (optional) the random jitter or deviation in days
:param jitter_units: (optional) the units of the jitter, Options: 'W', 'D', 'h', 'm', 's'. default 'D'
:param jitter_freq: (optional) a relative freq with the pattern mid point the mid point of the jitter
:param now_delta: (optional) returns a delta from now as an int list, Options: 'Y', 'M', 'W', 'D', 'h', 'm', 's'
:param min_date: (optional)a minimum date not to go below
:param max_date: (optional)a max date not to go above
:param fill_nulls: (optional) if no date values should remain untouched or filled based on the list mode date
:param day_first: (optional) if the dates given are day first format. Default to True
:param year_first: (optional) if the dates given are year first. Default to False
:param date_format: (optional) the format of the output
:param seed: (optional) a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: a list of equal size to that given
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
values = canonical[header].copy()
if values.empty:
return list()
def _clean(control):
_unit_type = ['years', 'months', 'weeks', 'days', 'leapdays', 'hours', 'minutes', 'seconds']
_params = {}
if isinstance(control, int):
control = {'days': control}
if isinstance(control, dict):
for k, v in control.items():
if k not in _unit_type:
raise ValueError(f"The key '{k}' in 'offset', is not a recognised unit type for pd.DateOffset")
return control
_seed = self._seed() if seed is None else seed
fill_nulls = False if fill_nulls is None or not isinstance(fill_nulls, bool) else fill_nulls
offset = _clean(offset) if isinstance(offset, (dict, int)) else None
if isinstance(now_delta, str) and now_delta not in ['Y', 'M', 'W', 'D', 'h', 'm', 's']:
raise ValueError(f"the now_delta offset unit '{now_delta}' is not recognised "
f"use of of ['Y', 'M', 'W', 'D', 'h', 'm', 's']")
units_allowed = ['W', 'D', 'h', 'm', 's']
jitter_units = jitter_units if isinstance(jitter_units, str) and jitter_units in units_allowed else 'D'
jitter = pd.Timedelta(value=jitter, unit=jitter_units) if isinstance(jitter, int) else None
# set minimum date
_min_date = pd.to_datetime(min_date, errors='coerce', infer_datetime_format=True, utc=True)
if _min_date is None or _min_date is pd.NaT:
_min_date = pd.to_datetime(pd.Timestamp.min, utc=True)
# set max date
_max_date = | pd.to_datetime(max_date, errors='coerce', infer_datetime_format=True, utc=True) | pandas.to_datetime |
import numpy as np
import pytest
import pandas as pd
from pandas import (
CategoricalDtype,
CategoricalIndex,
DataFrame,
Index,
IntervalIndex,
MultiIndex,
Series,
Timestamp,
)
import pandas._testing as tm
class TestDataFrameSortIndex:
def test_sort_index_and_reconstruction_doc_example(self):
# doc example
df = DataFrame(
{"value": [1, 2, 3, 4]},
index=MultiIndex(
levels=[["a", "b"], ["bb", "aa"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
assert df.index.is_lexsorted()
assert not df.index.is_monotonic
# sort it
expected = DataFrame(
{"value": [2, 1, 4, 3]},
index=MultiIndex(
levels=[["a", "b"], ["aa", "bb"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
result = df.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# reconstruct
result = df.sort_index().copy()
result.index = result.index._sort_levels_monotonic()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_sort_index_non_existent_label_multiindex(self):
# GH#12261
df = DataFrame(0, columns=[], index=MultiIndex.from_product([[], []]))
df.loc["b", "2"] = 1
df.loc["a", "3"] = 1
result = df.sort_index().index.is_monotonic
assert result is True
def test_sort_index_reorder_on_ops(self):
# GH#15687
df = DataFrame(
np.random.randn(8, 2),
index=MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["red", "blu"]],
names=["letter", "size", "color"],
),
columns=["near", "far"],
)
df = df.sort_index()
def my_func(group):
group.index = ["newz", "newa"]
return group
result = df.groupby(level=["letter", "size"]).apply(my_func).sort_index()
expected = MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["newa", "newz"]],
names=["letter", "size", None],
)
tm.assert_index_equal(result.index, expected)
def test_sort_index_nan_multiindex(self):
# GH#14784
# incorrect sorting w.r.t. nans
tuples = [[12, 13], [np.nan, np.nan], [np.nan, 3], [1, 2]]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(np.arange(16).reshape(4, 4), index=mi, columns=list("ABCD"))
s = Series(np.arange(4), index=mi)
df2 = DataFrame(
{
"date": pd.DatetimeIndex(
[
"20121002",
"20121007",
"20130130",
"20130202",
"20130305",
"20121002",
"20121207",
"20130130",
"20130202",
"20130305",
"20130202",
"20130305",
]
),
"user_id": [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
"whole_cost": [
1790,
np.nan,
280,
259,
np.nan,
623,
90,
312,
np.nan,
301,
359,
801,
],
"cost": [12, 15, 10, 24, 39, 1, 0, np.nan, 45, 34, 1, 12],
}
).set_index(["date", "user_id"])
# sorting frame, default nan position is last
result = df.sort_index()
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position last
result = df.sort_index(na_position="last")
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position first
result = df.sort_index(na_position="first")
expected = df.iloc[[1, 2, 3, 0], :]
tm.assert_frame_equal(result, expected)
# sorting frame with removed rows
result = df2.dropna().sort_index()
expected = df2.sort_index().dropna()
tm.assert_frame_equal(result, expected)
# sorting series, default nan position is last
result = s.sort_index()
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position last
result = s.sort_index(na_position="last")
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position first
result = s.sort_index(na_position="first")
expected = s.iloc[[1, 2, 3, 0]]
tm.assert_series_equal(result, expected)
def test_sort_index_nan(self):
# GH#3917
# Test DataFrame with nan label
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, np.nan],
)
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(kind="quicksort", ascending=True, na_position="last")
expected = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, np.nan],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position="first")
expected = DataFrame(
{"A": [4, 1, 2, np.nan, 1, 6, 8], "B": [5, 9, np.nan, 5, 2, 5, 4]},
index=[np.nan, 1, 2, 3, 4, 5, 6],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind="quicksort", ascending=False)
expected = DataFrame(
{"A": [8, 6, 1, np.nan, 2, 1, 4], "B": [4, 5, 2, 5, np.nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, np.nan],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind="quicksort", ascending=False, na_position="first"
)
expected = DataFrame(
{"A": [4, 8, 6, 1, np.nan, 2, 1], "B": [5, 4, 5, 2, 5, np.nan, 9]},
index=[np.nan, 6, 5, 4, 3, 2, 1],
)
| tm.assert_frame_equal(sorted_df, expected) | pandas._testing.assert_frame_equal |
import os
import unittest
import warnings
from collections import defaultdict
from unittest import mock
import numpy as np
import pandas as pd
import six
from dataprofiler.profilers import TextColumn, utils
from dataprofiler.profilers.profiler_options import TextOptions
from dataprofiler.tests.profilers import utils as test_utils
test_root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
class TestTextColumnProfiler(unittest.TestCase):
def setUp(self):
test_utils.set_seed(seed=0)
def test_profiled_vocab(self):
"""
Checks whether the vocab list for the profiler is correct.
:return:
"""
df1 = pd.Series([
"abcd", "aa", "abcd", "aa", "b", "4", "3", "2", "dfd", "2",
]).apply(str)
df2 = pd.Series(["1", "1", "ee", "ff", "ff", "gg",
"gg", "abcd", "aa", "b", "ee", "b"]).apply(str)
df3 = pd.Series([
"NaN", "b", "nan", "c",
]).apply(str)
text_profiler = TextColumn(df1.name)
text_profiler.update(df1)
unique_vocab = dict.fromkeys(''.join(df1.tolist())).keys()
six.assertCountEqual(self, unique_vocab, text_profiler.vocab)
six.assertCountEqual(
self, set(text_profiler.vocab), text_profiler.vocab)
text_profiler.update(df2)
df = pd.concat([df1, df2])
unique_vocab = dict.fromkeys(''.join(df.tolist())).keys()
six.assertCountEqual(self, unique_vocab, text_profiler.vocab)
six.assertCountEqual(
self, set(text_profiler.vocab), text_profiler.vocab)
text_profiler.update(df3)
df = pd.concat([df1, df2, df3])
unique_vocab = dict.fromkeys(''.join(df.tolist())).keys()
six.assertCountEqual(self, unique_vocab, text_profiler.vocab)
def test_profiled_str_numerics(self):
"""
Checks whether the vocab list for the profiler is correct.
:return:
"""
def mean(df):
total = 0
for item in df:
total += item
return total / len(df)
def var(df):
var = 0
mean_df = mean(df)
for item in df:
var += (item - mean_df) ** 2
return var / (len(df) - 1)
def batch_variance(mean_a, var_a, count_a, mean_b, var_b, count_b):
delta = mean_b - mean_a
m_a = var_a * (count_a - 1)
m_b = var_b * (count_b - 1)
M2 = m_a + m_b + delta ** 2 * count_a * count_b / (
count_a + count_b)
return M2 / (count_a + count_b - 1)
df1 = pd.Series([
"abcd", "aa", "abcd", "aa", "b", "4", "3", "2", "dfd", "2", np.nan,
]).apply(str)
df2 = pd.Series(["1", "1", "ee", "ff", "ff", "gg",
"gg", "abcd", "aa", "b", "ee", "b"]).apply(str)
df3 = pd.Series([
"NaN", "b", "nan", "c", None,
]).apply(str)
text_profiler = TextColumn(df1.name)
text_profiler.update(df1)
self.assertEqual(mean(df1.str.len()), text_profiler.mean)
self.assertAlmostEqual(var(df1.str.len()), text_profiler.variance)
self.assertAlmostEqual(
np.sqrt(var(df1.str.len())), text_profiler.stddev)
variance = batch_variance(
mean_a=text_profiler.mean,
var_a=text_profiler.variance,
count_a=text_profiler.sample_size,
mean_b=mean(df2.str.len()),
var_b=var(df2.str.len()),
count_b=df2.count()
)
text_profiler.update(df2)
df = pd.concat([df1, df2])
self.assertEqual(df.str.len().mean(), text_profiler.mean)
self.assertAlmostEqual(variance, text_profiler.variance)
self.assertAlmostEqual(np.sqrt(variance), text_profiler.stddev)
variance = batch_variance(
mean_a=text_profiler.mean,
var_a=text_profiler.variance,
count_a=text_profiler.match_count,
mean_b=mean(df3.str.len()),
var_b=var(df3.str.len()),
count_b=df3.count()
)
text_profiler.update(df3)
df = pd.concat([df1, df2, df3])
self.assertEqual(df.str.len().mean(), text_profiler.mean)
self.assertAlmostEqual(variance, text_profiler.variance)
self.assertAlmostEqual(np.sqrt(variance), text_profiler.stddev)
def test_base_case(self):
data = pd.Series([], dtype=object)
profiler = TextColumn(data.name)
profiler.update(data)
profiler.update(data) # intentional to validate no changes if empty
self.assertEqual(profiler.match_count, 0)
self.assertEqual(profiler.min, None)
self.assertEqual(profiler.max, None)
self.assertEqual(profiler.sum, 0)
self.assertIsNone(profiler.data_type_ratio)
def test_data_ratio(self):
# should always be 1.0 unless empty
df1 = pd.Series([
"abcd", "aa", "abcd", "aa", "b", "4", "3", "2", "dfd", "2",
]).apply(str)
profiler = TextColumn(df1.name)
profiler.update(df1)
self.assertEqual(profiler.data_type_ratio, 1.0)
# ensure batch update doesn't alter values
profiler.update(df1)
self.assertEqual(profiler.data_type_ratio, 1.0)
def test_profiled_min(self):
df = pd.Series(["aaa", "aa", "aaaa", "aaa"]).apply(str)
profiler = TextColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.min, 2)
df = pd.Series(["aa", "a"]).apply(str)
profiler.update(df)
self.assertEqual(profiler.min, 1)
def test_profiled_max(self):
df = | pd.Series(["a", "aa", "a", "a"]) | pandas.Series |
#Move all functions to this file
import pandas as pd
import numpy as np
from urllib import request
import json
import csv
import re
import time
import random
global api_key
api_key = '<KEY>'
def clean_movie_name(movie_name):
value = movie_name.strip().replace(' ','+')
return value
def get_tmdb_movie_id(movie_name,release_year):
query_url = 'https://api.themoviedb.org/3/search/movie?api_key={api_key}&query={movie_name}'.format(api_key=api_key,movie_name=movie_name)
resp = request.urlopen(query_url)
data = json.load(resp)
for movie in data['results']:
if release_year == movie['release_date'][:4]:
return movie['id']
else:
continue
def get_movie_details(movie_id):
url = 'https://api.themoviedb.org/3/movie/{movie_id}?api_key={api_key}&language=en-US'.format(api_key=api_key,movie_id = movie_id)
resp = request.urlopen(url)
data_dict = json.load(resp)
df = convert_resp_dict_series(data_dict)
#data_string = json.dumps(data)
#print(type(data_string),data_string)
return df
def convert_resp_dict_series(resp_dict):
df = pd.DataFrame.from_dict(resp_dict, orient='index')
return df.transpose()
def get_movielens_details(movie_string):
for item in re.finditer("(?P<name>[a-zA-Z\s:,#&-'0-9!?]*)(?:\()(?P<year>[0-9\s]*)(?:\))",movie_string):
m = item.groupdict()
return m
movielens = | pd.read_csv("C:/Users/tam74426/MADS/SIADS 591/Project/data/ml-25m/movies.csv") | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import netCDF4 as nc
from netCDF4 import Dataset
id
import itertools
import datetime
from matplotlib.font_manager import FontProperties
#------------------------------------------------------------------------------
# Motivación codigo -----------------------------------------------------------
'Esta version de este codigo, saca los umbrales horarios y estacionalesde las reflectancias'
'en los pixeles seleccionados, cada 15 minutos porque se hace con el set de datos de GOES de'
'2018, debido a que es el mas completo y permitiría obtener los umbrales estacionalmente. La'
'versión antigua de este codigo que los sacaba cada 10 minutos para el horizonte del experi-'
'mento se aloja en la carpetade Backups_VersionesAtiguas_Codigos por si esnecesario volverlo'
'a consultar.'
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
## -------------------------------HORAS SOBRE LAS CUALES TRABAJAR----------------------------- ##
HI = '06:00'; HF = '17:59'
#################################################################################################
## -----------------INCORPORANDO LOS DATOS DE RADIACIÓN Y DE LOS EXPERIMENTOS----------------- ##
#################################################################################################
df_P975 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60012018.txt', parse_dates=[2])
df_P350 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60022018.txt', parse_dates=[2])
df_P348 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60032018.txt', parse_dates=[2])
df_P975 = df_P975.set_index(["fecha_hora"])
df_P975.index = df_P975.index.tz_localize('UTC').tz_convert('America/Bogota')
df_P975.index = df_P975.index.tz_localize(None)
df_P350 = df_P350.set_index(["fecha_hora"])
df_P350.index = df_P350.index.tz_localize('UTC').tz_convert('America/Bogota')
df_P350.index = df_P350.index.tz_localize(None)
df_P348 = df_P348.set_index(["fecha_hora"])
df_P348.index = df_P348.index.tz_localize('UTC').tz_convert('America/Bogota')
df_P348.index = df_P348.index.tz_localize(None)
df_P975.index = pd.to_datetime(df_P975.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P350.index = pd.to_datetime(df_P350.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P348.index = pd.to_datetime(df_P348.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
## ----------------ACOTANDO LOS DATOS A VALORES VÁLIDOS---------------- ##
'Como en este caso lo que interesa es la radiacion, para la filtración de los datos, se'
'considerarán los datos de radiacion mayores a 0.'
df_P975 = df_P975[(df_P975['radiacion'] > 0) ]
df_P350 = df_P350[(df_P350['radiacion'] > 0) ]
df_P348 = df_P348[(df_P348['radiacion'] > 0) ]
df_P975_h = df_P975.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P350_h = df_P350.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P348_h = df_P348.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
##----AJUSTE DE LOS DATOS DE RADIACIÓN REAL AL RANGO DE FECHAS DESEADO-----##
def daterange(start_date, end_date):
'Para el ajuste de las fechas en el modelo de Kumar cada hora. Las fechas'
'final e inicial son en str: %Y-%m-%d'
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
delta = timedelta(minutes=60)
while start_date <= end_date:
yield start_date
start_date += delta
fechas_975 = []
for i in daterange(df_P975.index[0].date().strftime("%Y-%m-%d"), (df_P975.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_975.append(i)
fechas_350 = []
for i in daterange(df_P350.index[0].date().strftime("%Y-%m-%d"), (df_P350.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_350.append(i)
fechas_348 = []
for i in daterange(df_P348.index[0].date().strftime("%Y-%m-%d"), (df_P348.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_348.append(i)
fi_m = min(fechas_975[0].month, fechas_350[0].month, fechas_348[0].month)
fi_d = min(fechas_975[0].day, fechas_350[0].day, fechas_348[0].day)
ff_m = min(fechas_975[-1].month, fechas_350[-1].month, fechas_348[-1].month)
ff_d = min(fechas_975[-1].day, fechas_350[-1].day, fechas_348[-1].day)
## -----------------------------AGREGAR DATOS DE PIRANOMETRO CADA 15 MINUTOS ------------------------------ ##
df_P348_15m = df_P348.groupby(pd.Grouper(freq="15Min")).mean()
df_P350_15m = df_P350.groupby(pd.Grouper(freq="15Min")).mean()
df_P975_15m = df_P975.groupby(pd.Grouper(freq="15Min")).mean()
df_P348_15m = df_P348_15m.between_time(HI, HF)
df_P350_15m = df_P350_15m.between_time(HI, HF)
df_P975_15m = df_P975_15m.between_time(HI, HF)
df_P348_15m = df_P348_15m.loc[~df_P348_15m.index.duplicated(keep='first')]
df_P350_15m = df_P350_15m.loc[~df_P350_15m.index.duplicated(keep='first')]
df_P975_15m = df_P975_15m.loc[~df_P975_15m.index.duplicated(keep='first')]
####################################################################################
## ----------------LECTURA DE LOS DATOS DE GOES CH2 MALLA GENERAL---------------- ##
####################################################################################
Rad = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Rad_2018_2019CH2.npy')
fechas_horas = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_FechasHoras_Anio.npy')
df_fh = pd.DataFrame()
df_fh ['fecha_hora'] = fechas_horas
df_fh['fecha_hora'] = pd.to_datetime(df_fh['fecha_hora'], format="%Y-%m-%d %H:%M", errors='coerce')
df_fh.index = df_fh['fecha_hora']
w = pd.date_range(df_fh.index.min(), df_fh.index.max()).difference(df_fh.index)
df_fh = df_fh[df_fh.index.hour != 5]
#################################################################################################
##-------------------LECTURA DE LOS DATOS DE CH2 GOES PARA CADA PIXEL--------------------------##
#################################################################################################
Rad_pixel_975 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Rad_pix975_Anio.npy')
Rad_pixel_350 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Rad_pix350_Anio.npy')
Rad_pixel_348 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Rad_pix348_Anio.npy')
fechas_horas = df_fh['fecha_hora'].values
## -- Creación de dataframe de radiancias
Rad_df_975 = pd.DataFrame()
Rad_df_975['Fecha_Hora'] = fechas_horas
Rad_df_975['Radiacias'] = Rad_pixel_975
Rad_df_975['Fecha_Hora'] = pd.to_datetime(Rad_df_975['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Rad_df_975.index = Rad_df_975['Fecha_Hora']
Rad_df_975 = Rad_df_975.drop(['Fecha_Hora'], axis=1)
Rad_df_975 = Rad_df_975.between_time('06:00', '18:00') ##--> Seleccionar solo los datos de horas del dia
Rad_df_975_h = Rad_df_975.groupby(pd.Grouper(freq="H")).mean()
Rad_df_350 = pd.DataFrame()
Rad_df_350['Fecha_Hora'] = fechas_horas
Rad_df_350['Radiacias'] = Rad_pixel_350
Rad_df_350['Fecha_Hora'] = pd.to_datetime(Rad_df_350['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Rad_df_350.index = Rad_df_350['Fecha_Hora']
Rad_df_350 = Rad_df_350.drop(['Fecha_Hora'], axis=1)
Rad_df_350 = Rad_df_350.between_time('06:00', '18:00') ##--> Seleccionar solo los datos de horas del dia
Rad_df_350_h = Rad_df_350.groupby(pd.Grouper(freq="H")).mean()
Rad_df_348 = pd.DataFrame()
Rad_df_348['Fecha_Hora'] = fechas_horas
Rad_df_348['Radiacias'] = Rad_pixel_348
Rad_df_348['Fecha_Hora'] = pd.to_datetime(Rad_df_348['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Rad_df_348.index = Rad_df_348['Fecha_Hora']
Rad_df_348 = Rad_df_348.drop(['Fecha_Hora'], axis=1)
Rad_df_348 = Rad_df_348.between_time('06:00', '18:00') ##--> Seleccionar solo los datos de horas del dia
Rad_df_348_h = Rad_df_348.groupby(pd.Grouper(freq="H")).mean()
def time_mod(time, delta, epoch=None):
if epoch is None:
epoch = datetime.datetime(1970, 1, 1, tzinfo=time.tzinfo)
return (time - epoch) % delta
def time_round(time, delta, epoch=None):
mod = time_mod(time, delta, epoch)
if mod < (delta / 2):
return time - mod
return time + (delta - mod)
Rad_df_348.index = [time_round(Rad_df_348.index[i], datetime.timedelta(minutes=15)) for i in range(len(Rad_df_348.index))]
Rad_df_350.index = [time_round(Rad_df_350.index[i], datetime.timedelta(minutes=15)) for i in range(len(Rad_df_350.index))]
Rad_df_975.index = [time_round(Rad_df_975.index[i], datetime.timedelta(minutes=15)) for i in range(len(Rad_df_975.index))]
Rad_df_348 = Rad_df_348.loc[~Rad_df_348.index.duplicated(keep='first')]
Rad_df_350 = Rad_df_350.loc[~Rad_df_350.index.duplicated(keep='first')]
Rad_df_975 = Rad_df_975.loc[~Rad_df_975.index.duplicated(keep='first')]
##----------------------------------ACOTANDOLO A LOS DATOS DE SOLO EL 2018---------------------------------##
Rad_df_975 = Rad_df_975[Rad_df_975.index.year==2018]
Rad_df_350 = Rad_df_350[Rad_df_350.index.year==2018]
Rad_df_348 = Rad_df_348[Rad_df_348.index.year==2018]
###############################################################################################################################
## ---------------------------------LECTURA DE LOS DATOS DE RADIACIÓN TEORICA KUMAR----------------------------------------- ##
###############################################################################################################################
def daterange(start_date, end_date):
'Para el ajuste de las fechas en el modelo de Kumar cada 10 min. Las fechas final e inicial son en str: %Y-%m-%d'
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
delta = timedelta(minutes=10)
while start_date <= end_date:
yield start_date
start_date += delta
def serie_Kumar_Model(estacion):
'Retorna un dataframe horario con la radiacion teórico con las recomendacione de Kumar elaborado por <NAME> ' \
'para el AMVA y su tesis. El dataframe original se le ordenan los datos a 12 meses ascendentes (2018), aunque pueden ' \
' pertencer a años difernetes. El resultado es para el punto seleccionado y con el archivo de Total_Timeseries.csv. Actualizar el año'
data_Model = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Radiacion_GIS/Teoricos_Totales/Total_Timeseries_Rad_2018.csv',
sep=',')
fecha_hora = [pd.to_datetime(data_Model['Unnamed: 0'], format="%Y-%m-%d %H:%M:%S")[i].to_pydatetime() for i in
range(len(data_Model['Unnamed: 0']))]
data_Model.index = fecha_hora
data_Model = data_Model.sort_index()
data_Model['Month'] = np.array(data_Model.index.month)
data_Model = data_Model.sort_values(by="Month")
fechas = []
for i in daterange('2018-01-01', '2019-01-01'):
fechas.append(i)
fechas = fechas[0:-1]
if estacion == '6001':
punto = data_Model['TS_kumar']
elif estacion == '6002':
punto = data_Model['CI_kumar']
elif estacion == '6003':
punto = data_Model['JV_kumar']
Rad_teorica = []
for i in range(len(fechas)):
mes = fechas[i].month
hora = fechas[i].hour
mint = fechas[i].minute
rad = \
np.where((data_Model.index.month == mes) & (data_Model.index.hour == hora) & (data_Model.index.minute == mint))[
0]
if len(rad) == 0:
Rad_teorica.append(np.nan)
else:
Rad_teorica.append(punto.iloc[rad].values[0])
data_Theorical = pd.DataFrame()
data_Theorical['fecha_hora'] = fechas
data_Theorical['Radiacion'] = Rad_teorica
data_Theorical.index = data_Theorical['fecha_hora']
data_Theorical = data_Theorical[data_Theorical['Radiacion'] > 0]
data_hourly_theoric = data_Theorical.groupby(pd.Grouper(freq="H")).mean()
return data_hourly_theoric, data_Theorical
df_hourly_theoric_348, df_Theorical_348 = serie_Kumar_Model('6003')
df_hourly_theoric_350, df_Theorical_350 = serie_Kumar_Model('6002')
df_hourly_theoric_975, df_Theorical_975 = serie_Kumar_Model('6001')
######################################################################################################################
## -----------------------------ACOTAR LOS DATOS DE LA RAD TEÓRICA A LOS DE RADIACION------------------------------ ##
######################################################################################################################
df_hourly_theoric_348 = df_hourly_theoric_348[(df_hourly_theoric_348.index >= '2018-'+'0'+str(df_P348.index.month[0])
+'-'+str(df_P348.index.day[0])) & (df_hourly_theoric_348.index <= '2018-'+str(df_P348.index.month[-1])
+'-'+str(df_P348.index.day[-1]))]
df_hourly_theoric_350 = df_hourly_theoric_350[(df_hourly_theoric_350.index >= '2018-'+'0'+str(df_P350.index.month[0])
+'-'+str(df_P350.index.day[0])) & (df_hourly_theoric_350.index <= '2018-'+str(df_P350.index.month[-1])
+'-'+str(df_P350.index.day[-1]))]
df_hourly_theoric_975 = df_hourly_theoric_975[(df_hourly_theoric_975.index >= '2018-'+'0'+str(df_P975.index.month[0])
+'-'+str(df_P975.index.day[0])) & (df_hourly_theoric_975.index <= '2018-'+str(df_P975.index.month[-1])
+'-'+str(df_P975.index.day[-1]))]
df_Theorical_348 = df_Theorical_348[(df_Theorical_348.index >= '2018-'+'0'+str(df_P348.index.month[0])
+'-'+str(df_P348.index.day[0])) & (df_Theorical_348.index <= '2018-'+str(df_P348.index.month[-1])
+'-'+str(df_P348.index.day[-1]))]
df_Theorical_350 = df_Theorical_350[(df_Theorical_350.index >= '2018-'+'0'+str(df_P350.index.month[0])
+'-'+str(df_P350.index.day[0])) & (df_Theorical_350.index <= '2018-'+str(df_P350.index.month[-1])
+'-'+str(df_P350.index.day[-1]))]
df_Theorical_975 = df_Theorical_975[(df_Theorical_975.index >= '2018-'+'0'+str(df_P975.index.month[0])
+'-'+str(df_P975.index.day[0])) & (df_Theorical_975.index <= '2018-'+str(df_P975.index.month[-1])
+'-'+str(df_P975.index.day[-1]))]
df_Theorical_348.index = [time_round(df_Theorical_348.index[i], datetime.timedelta(minutes=15)) for i in range(len(df_Theorical_348.index))]
df_Theorical_350.index = [time_round(df_Theorical_350.index[i], datetime.timedelta(minutes=15)) for i in range(len(df_Theorical_350.index))]
df_Theorical_975.index = [time_round(df_Theorical_975.index[i], datetime.timedelta(minutes=15)) for i in range(len(df_Theorical_975.index))]
df_Theorical_348 = df_Theorical_348.drop(['fecha_hora'], axis=1)
df_Theorical_348 = df_Theorical_348.loc[~df_Theorical_348.index.duplicated(keep='first')]
df_Theorical_350 = df_Theorical_350.drop(['fecha_hora'], axis=1)
df_Theorical_350 = df_Theorical_350.loc[~df_Theorical_350.index.duplicated(keep='first')]
df_Theorical_975 = df_Theorical_975.drop(['fecha_hora'], axis=1)
df_Theorical_975 = df_Theorical_975.loc[~df_Theorical_975.index.duplicated(keep='first')]
##################################################################################################################
## -----------------------------ANÁLISIS DE RESULTADOS Y DETERMINACIÓN DEL UMBRAL------------------------------ ##
##################################################################################################################
'Se determina las reflectancias para las condiciones despejadas con el piranometro cada 15 minutos. Para detectar las reflectancias '
'nubadas en cada punto, se detectan por las derivadas discriminando por mañana y tarde. Los estoy poniendo por ahora con el máximo.'
df_result_348 = pd.concat([df_P348_15m, Rad_df_348, df_Theorical_348], axis=1)
df_result_350 = pd.concat([df_P350_15m, Rad_df_350, df_Theorical_350], axis=1)
df_result_975 = pd.concat([df_P975_15m, Rad_df_975, df_Theorical_975], axis=1)
df_result_348['Rad_deriv'] = np.gradient(df_result_348['radiacion'].values)
df_result_350['Rad_deriv'] = np.gradient(df_result_350['radiacion'].values)
df_result_975['Rad_deriv'] = np.gradient(df_result_975['radiacion'].values)
df_result_348 = df_result_348.drop(['Unnamed: 0', 'idestacion', 'temperatura'], axis=1)
df_result_350 = df_result_350.drop(['Unnamed: 0', 'idestacion', 'temperatura'], axis=1)
df_result_975 = df_result_975.drop(['Unnamed: 0', 'idestacion', 'temperatura'], axis=1)
## ---- UMBRAL CASO NUBOSO :
df_P348_15m_Nuba_Morning = df_result_348.between_time('06:00','11:59')
df_P348_15m_Nuba_Afternoon = df_result_348.between_time('12:00','17:59')
df_P350_15m_Nuba_Morning = df_result_350.between_time('06:00','11:59')
df_P350_15m_Nuba_Afternoon = df_result_350.between_time('12:00','17:59')
df_P975_15m_Nuba_Morning = df_result_975.between_time('06:00','11:59')
df_P975_15m_Nuba_Afternoon = df_result_975.between_time('12:00','17:59')
##-----------------------------------------348--------------------------------------##
df_P348_Ref_Morning = df_P348_15m_Nuba_Morning['Radiacias'][df_P348_15m_Nuba_Morning['Rad_deriv']<0]
df_P348_Ref_Morning = df_P348_Ref_Morning.groupby([df_P348_Ref_Morning.index.month, df_P348_Ref_Morning.index.hour]).mean()
df_P348_Ref_Afternoon = df_P348_15m_Nuba_Afternoon['Radiacias'][df_P348_15m_Nuba_Afternoon['Rad_deriv']>0]
df_P348_Ref_Afternoon = df_P348_Ref_Afternoon.groupby([df_P348_Ref_Afternoon.index.month, df_P348_Ref_Afternoon.index.hour]).mean()
df_HourlySeasonalThrereshold_348_Nuba = | pd.concat([df_P348_Ref_Morning, df_P348_Ref_Afternoon]) | pandas.concat |
import tempfile
import copy
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
try:
from scipy.spatial import distance
from scipy.cluster import hierarchy
_no_scipy = False
except ImportError:
_no_scipy = True
try:
import fastcluster
assert fastcluster
_no_fastcluster = False
except ImportError:
_no_fastcluster = True
import numpy.testing as npt
try:
import pandas.testing as pdt
except ImportError:
import pandas.util.testing as pdt
import pytest
from .. import matrix as mat
from .. import color_palette
from .._testing import assert_colors_equal
class TestHeatmap:
rs = np.random.RandomState(sum(map(ord, "heatmap")))
x_norm = rs.randn(4, 8)
letters = pd.Series(["A", "B", "C", "D"], name="letters")
df_norm = pd.DataFrame(x_norm, index=letters)
x_unif = rs.rand(20, 13)
df_unif = pd.DataFrame(x_unif)
default_kws = dict(vmin=None, vmax=None, cmap=None, center=None,
robust=False, annot=False, fmt=".2f", annot_kws=None,
cbar=True, cbar_kws=None, mask=None)
def test_ndarray_input(self):
p = mat._HeatMapper(self.x_norm, **self.default_kws)
npt.assert_array_equal(p.plot_data, self.x_norm)
pdt.assert_frame_equal(p.data, pd.DataFrame(self.x_norm))
npt.assert_array_equal(p.xticklabels, np.arange(8))
npt.assert_array_equal(p.yticklabels, np.arange(4))
assert p.xlabel == ""
assert p.ylabel == ""
def test_df_input(self):
p = mat._HeatMapper(self.df_norm, **self.default_kws)
npt.assert_array_equal(p.plot_data, self.x_norm)
pdt.assert_frame_equal(p.data, self.df_norm)
npt.assert_array_equal(p.xticklabels, np.arange(8))
npt.assert_array_equal(p.yticklabels, self.letters.values)
assert p.xlabel == ""
assert p.ylabel == "letters"
def test_df_multindex_input(self):
df = self.df_norm.copy()
index = pd.MultiIndex.from_tuples([("A", 1), ("B", 2),
("C", 3), ("D", 4)],
names=["letter", "number"])
index.name = "letter-number"
df.index = index
p = mat._HeatMapper(df, **self.default_kws)
combined_tick_labels = ["A-1", "B-2", "C-3", "D-4"]
npt.assert_array_equal(p.yticklabels, combined_tick_labels)
assert p.ylabel == "letter-number"
p = mat._HeatMapper(df.T, **self.default_kws)
npt.assert_array_equal(p.xticklabels, combined_tick_labels)
assert p.xlabel == "letter-number"
@pytest.mark.parametrize("dtype", [float, np.int64, object])
def test_mask_input(self, dtype):
kws = self.default_kws.copy()
mask = self.x_norm > 0
kws['mask'] = mask
data = self.x_norm.astype(dtype)
p = mat._HeatMapper(data, **kws)
plot_data = np.ma.masked_where(mask, data)
npt.assert_array_equal(p.plot_data, plot_data)
def test_mask_limits(self):
"""Make sure masked cells are not used to calculate extremes"""
kws = self.default_kws.copy()
mask = self.x_norm > 0
kws['mask'] = mask
p = mat._HeatMapper(self.x_norm, **kws)
assert p.vmax == np.ma.array(self.x_norm, mask=mask).max()
assert p.vmin == np.ma.array(self.x_norm, mask=mask).min()
mask = self.x_norm < 0
kws['mask'] = mask
p = mat._HeatMapper(self.x_norm, **kws)
assert p.vmin == np.ma.array(self.x_norm, mask=mask).min()
assert p.vmax == np.ma.array(self.x_norm, mask=mask).max()
def test_default_vlims(self):
p = mat._HeatMapper(self.df_unif, **self.default_kws)
assert p.vmin == self.x_unif.min()
assert p.vmax == self.x_unif.max()
def test_robust_vlims(self):
kws = self.default_kws.copy()
kws["robust"] = True
p = mat._HeatMapper(self.df_unif, **kws)
assert p.vmin == np.percentile(self.x_unif, 2)
assert p.vmax == np.percentile(self.x_unif, 98)
def test_custom_sequential_vlims(self):
kws = self.default_kws.copy()
kws["vmin"] = 0
kws["vmax"] = 1
p = mat._HeatMapper(self.df_unif, **kws)
assert p.vmin == 0
assert p.vmax == 1
def test_custom_diverging_vlims(self):
kws = self.default_kws.copy()
kws["vmin"] = -4
kws["vmax"] = 5
kws["center"] = 0
p = mat._HeatMapper(self.df_norm, **kws)
assert p.vmin == -4
assert p.vmax == 5
def test_array_with_nans(self):
x1 = self.rs.rand(10, 10)
nulls = np.zeros(10) * np.nan
x2 = np.c_[x1, nulls]
m1 = mat._HeatMapper(x1, **self.default_kws)
m2 = mat._HeatMapper(x2, **self.default_kws)
assert m1.vmin == m2.vmin
assert m1.vmax == m2.vmax
def test_mask(self):
df = pd.DataFrame(data={'a': [1, 1, 1],
'b': [2, np.nan, 2],
'c': [3, 3, np.nan]})
kws = self.default_kws.copy()
kws["mask"] = np.isnan(df.values)
m = mat._HeatMapper(df, **kws)
npt.assert_array_equal(np.isnan(m.plot_data.data),
m.plot_data.mask)
def test_custom_cmap(self):
kws = self.default_kws.copy()
kws["cmap"] = "BuGn"
p = mat._HeatMapper(self.df_unif, **kws)
assert p.cmap == mpl.cm.BuGn
def test_centered_vlims(self):
kws = self.default_kws.copy()
kws["center"] = .5
p = mat._HeatMapper(self.df_unif, **kws)
assert p.vmin == self.df_unif.values.min()
assert p.vmax == self.df_unif.values.max()
def test_default_colors(self):
vals = np.linspace(.2, 1, 9)
cmap = mpl.cm.binary
ax = mat.heatmap([vals], cmap=cmap)
fc = ax.collections[0].get_facecolors()
cvals = np.linspace(0, 1, 9)
npt.assert_array_almost_equal(fc, cmap(cvals), 2)
def test_custom_vlim_colors(self):
vals = np.linspace(.2, 1, 9)
cmap = mpl.cm.binary
ax = mat.heatmap([vals], vmin=0, cmap=cmap)
fc = ax.collections[0].get_facecolors()
npt.assert_array_almost_equal(fc, cmap(vals), 2)
def test_custom_center_colors(self):
vals = np.linspace(.2, 1, 9)
cmap = mpl.cm.binary
ax = mat.heatmap([vals], center=.5, cmap=cmap)
fc = ax.collections[0].get_facecolors()
npt.assert_array_almost_equal(fc, cmap(vals), 2)
def test_cmap_with_properties(self):
kws = self.default_kws.copy()
cmap = copy.copy(mpl.cm.get_cmap("BrBG"))
cmap.set_bad("red")
kws["cmap"] = cmap
hm = mat._HeatMapper(self.df_unif, **kws)
npt.assert_array_equal(
cmap(np.ma.masked_invalid([np.nan])),
hm.cmap(np.ma.masked_invalid([np.nan])))
kws["center"] = 0.5
hm = mat._HeatMapper(self.df_unif, **kws)
npt.assert_array_equal(
cmap(np.ma.masked_invalid([np.nan])),
hm.cmap(np.ma.masked_invalid([np.nan])))
kws = self.default_kws.copy()
cmap = copy.copy(mpl.cm.get_cmap("BrBG"))
cmap.set_under("red")
kws["cmap"] = cmap
hm = mat._HeatMapper(self.df_unif, **kws)
npt.assert_array_equal(cmap(-np.inf), hm.cmap(-np.inf))
kws["center"] = .5
hm = mat._HeatMapper(self.df_unif, **kws)
npt.assert_array_equal(cmap(-np.inf), hm.cmap(-np.inf))
kws = self.default_kws.copy()
cmap = copy.copy(mpl.cm.get_cmap("BrBG"))
cmap.set_over("red")
kws["cmap"] = cmap
hm = mat._HeatMapper(self.df_unif, **kws)
npt.assert_array_equal(cmap(-np.inf), hm.cmap(-np.inf))
kws["center"] = .5
hm = mat._HeatMapper(self.df_unif, **kws)
npt.assert_array_equal(cmap(np.inf), hm.cmap(np.inf))
def test_tickabels_off(self):
kws = self.default_kws.copy()
kws['xticklabels'] = False
kws['yticklabels'] = False
p = mat._HeatMapper(self.df_norm, **kws)
assert p.xticklabels == []
assert p.yticklabels == []
def test_custom_ticklabels(self):
kws = self.default_kws.copy()
xticklabels = list('iheartheatmaps'[:self.df_norm.shape[1]])
yticklabels = list('heatmapsarecool'[:self.df_norm.shape[0]])
kws['xticklabels'] = xticklabels
kws['yticklabels'] = yticklabels
p = mat._HeatMapper(self.df_norm, **kws)
assert p.xticklabels == xticklabels
assert p.yticklabels == yticklabels
def test_custom_ticklabel_interval(self):
kws = self.default_kws.copy()
xstep, ystep = 2, 3
kws['xticklabels'] = xstep
kws['yticklabels'] = ystep
p = mat._HeatMapper(self.df_norm, **kws)
nx, ny = self.df_norm.T.shape
npt.assert_array_equal(p.xticks, np.arange(0, nx, xstep) + .5)
npt.assert_array_equal(p.yticks, np.arange(0, ny, ystep) + .5)
npt.assert_array_equal(p.xticklabels,
self.df_norm.columns[0:nx:xstep])
npt.assert_array_equal(p.yticklabels,
self.df_norm.index[0:ny:ystep])
def test_heatmap_annotation(self):
ax = mat.heatmap(self.df_norm, annot=True, fmt=".1f",
annot_kws={"fontsize": 14})
for val, text in zip(self.x_norm.flat, ax.texts):
assert text.get_text() == "{:.1f}".format(val)
assert text.get_fontsize() == 14
def test_heatmap_annotation_overwrite_kws(self):
annot_kws = dict(color="0.3", va="bottom", ha="left")
ax = mat.heatmap(self.df_norm, annot=True, fmt=".1f",
annot_kws=annot_kws)
for text in ax.texts:
assert text.get_color() == "0.3"
assert text.get_ha() == "left"
assert text.get_va() == "bottom"
def test_heatmap_annotation_with_mask(self):
df = pd.DataFrame(data={'a': [1, 1, 1],
'b': [2, np.nan, 2],
'c': [3, 3, np.nan]})
mask = np.isnan(df.values)
df_masked = np.ma.masked_where(mask, df)
ax = mat.heatmap(df, annot=True, fmt='.1f', mask=mask)
assert len(df_masked.compressed()) == len(ax.texts)
for val, text in zip(df_masked.compressed(), ax.texts):
assert "{:.1f}".format(val) == text.get_text()
def test_heatmap_annotation_mesh_colors(self):
ax = mat.heatmap(self.df_norm, annot=True)
mesh = ax.collections[0]
assert len(mesh.get_facecolors()) == self.df_norm.values.size
plt.close("all")
def test_heatmap_annotation_other_data(self):
annot_data = self.df_norm + 10
ax = mat.heatmap(self.df_norm, annot=annot_data, fmt=".1f",
annot_kws={"fontsize": 14})
for val, text in zip(annot_data.values.flat, ax.texts):
assert text.get_text() == "{:.1f}".format(val)
assert text.get_fontsize() == 14
def test_heatmap_annotation_with_limited_ticklabels(self):
ax = mat.heatmap(self.df_norm, fmt=".2f", annot=True,
xticklabels=False, yticklabels=False)
for val, text in zip(self.x_norm.flat, ax.texts):
assert text.get_text() == "{:.2f}".format(val)
def test_heatmap_cbar(self):
f = plt.figure()
mat.heatmap(self.df_norm)
assert len(f.axes) == 2
plt.close(f)
f = plt.figure()
mat.heatmap(self.df_norm, cbar=False)
assert len(f.axes) == 1
plt.close(f)
f, (ax1, ax2) = plt.subplots(2)
mat.heatmap(self.df_norm, ax=ax1, cbar_ax=ax2)
assert len(f.axes) == 2
plt.close(f)
@pytest.mark.xfail(mpl.__version__ == "3.1.1",
reason="matplotlib 3.1.1 bug")
def test_heatmap_axes(self):
ax = mat.heatmap(self.df_norm)
xtl = [int(l.get_text()) for l in ax.get_xticklabels()]
assert xtl == list(self.df_norm.columns)
ytl = [l.get_text() for l in ax.get_yticklabels()]
assert ytl == list(self.df_norm.index)
assert ax.get_xlabel() == ""
assert ax.get_ylabel() == "letters"
assert ax.get_xlim() == (0, 8)
assert ax.get_ylim() == (4, 0)
def test_heatmap_ticklabel_rotation(self):
f, ax = plt.subplots(figsize=(2, 2))
mat.heatmap(self.df_norm, xticklabels=1, yticklabels=1, ax=ax)
for t in ax.get_xticklabels():
assert t.get_rotation() == 0
for t in ax.get_yticklabels():
assert t.get_rotation() == 90
plt.close(f)
df = self.df_norm.copy()
df.columns = [str(c) * 10 for c in df.columns]
df.index = [i * 10 for i in df.index]
f, ax = plt.subplots(figsize=(2, 2))
mat.heatmap(df, xticklabels=1, yticklabels=1, ax=ax)
for t in ax.get_xticklabels():
assert t.get_rotation() == 90
for t in ax.get_yticklabels():
assert t.get_rotation() == 0
plt.close(f)
def test_heatmap_inner_lines(self):
c = (0, 0, 1, 1)
ax = mat.heatmap(self.df_norm, linewidths=2, linecolor=c)
mesh = ax.collections[0]
assert mesh.get_linewidths()[0] == 2
assert tuple(mesh.get_edgecolor()[0]) == c
def test_square_aspect(self):
ax = mat.heatmap(self.df_norm, square=True)
obs_aspect = ax.get_aspect()
# mpl>3.3 returns 1 for setting "equal" aspect
# so test for the two possible equal outcomes
assert obs_aspect == "equal" or obs_aspect == 1
def test_mask_validation(self):
mask = mat._matrix_mask(self.df_norm, None)
assert mask.shape == self.df_norm.shape
assert mask.values.sum() == 0
with pytest.raises(ValueError):
bad_array_mask = self.rs.randn(3, 6) > 0
mat._matrix_mask(self.df_norm, bad_array_mask)
with pytest.raises(ValueError):
bad_df_mask = pd.DataFrame(self.rs.randn(4, 8) > 0)
mat._matrix_mask(self.df_norm, bad_df_mask)
def test_missing_data_mask(self):
data = pd.DataFrame(np.arange(4, dtype=float).reshape(2, 2))
data.loc[0, 0] = np.nan
mask = mat._matrix_mask(data, None)
npt.assert_array_equal(mask, [[True, False], [False, False]])
mask_in = np.array([[False, True], [False, False]])
mask_out = mat._matrix_mask(data, mask_in)
npt.assert_array_equal(mask_out, [[True, True], [False, False]])
def test_cbar_ticks(self):
f, (ax1, ax2) = plt.subplots(2)
mat.heatmap(self.df_norm, ax=ax1, cbar_ax=ax2,
cbar_kws=dict(drawedges=True))
assert len(ax2.collections) == 2
@pytest.mark.skipif(_no_scipy, reason="Test requires scipy")
class TestDendrogram:
rs = np.random.RandomState(sum(map(ord, "dendrogram")))
default_kws = dict(linkage=None, metric='euclidean', method='single',
axis=1, label=True, rotate=False)
x_norm = rs.randn(4, 8) + np.arange(8)
x_norm = (x_norm.T + np.arange(4)).T
letters = pd.Series(["A", "B", "C", "D", "E", "F", "G", "H"],
name="letters")
df_norm = pd.DataFrame(x_norm, columns=letters)
if not _no_scipy:
if _no_fastcluster:
x_norm_distances = distance.pdist(x_norm.T, metric='euclidean')
x_norm_linkage = hierarchy.linkage(x_norm_distances, method='single')
else:
x_norm_linkage = fastcluster.linkage_vector(x_norm.T,
metric='euclidean',
method='single')
x_norm_dendrogram = hierarchy.dendrogram(x_norm_linkage, no_plot=True,
color_threshold=-np.inf)
x_norm_leaves = x_norm_dendrogram['leaves']
df_norm_leaves = np.asarray(df_norm.columns[x_norm_leaves])
def test_ndarray_input(self):
p = mat._DendrogramPlotter(self.x_norm, **self.default_kws)
npt.assert_array_equal(p.array.T, self.x_norm)
pdt.assert_frame_equal(p.data.T, | pd.DataFrame(self.x_norm) | pandas.DataFrame |
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = | pd.read_csv(filepath, **kwargs) | pandas.read_csv |
import os
import sys
import warnings
sys.path.append(os.path.abspath('../'))
import numpy as np
from tqdm import tqdm
from imageio import mimwrite
from skimage import img_as_float, img_as_uint
from skimage.io import imread, imsave
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from natsort import natsorted
from scipy.interpolate import interp1d
from matplotlib.ticker import LogLocator, NullFormatter
import matplotlib.patches as patches
from cytomata.plot import plot_cell_img, plot_bkg_profile, plot_uy
from cytomata.process import preprocess_img, segment_object, segment_clusters, process_u_csv
from cytomata.utils import list_img_files, custom_styles, custom_palette
def process_fluo_timelapse(img_dir, save_dir, u_csv=None,
t_unit='s', ulabel='BL', sb_microns=11, cmax=None,
segmt=False, segmt_dots=False, segmt_mask=None, segmt_factor=1,
remove_small=None, fill_holes=None, clear_border=None, adj_bright=False):
"""Analyze fluorescence timelapse images and generate figures."""
if cmax is None:
cmax = np.max([np.percentile(img_as_float(imread(imgf)), 99.9) for imgf in list_img_files(img_dir)])
n_imgs = len(list_img_files(img_dir))
t = [float(os.path.splitext(os.path.basename(imgf))[0]) for imgf in list_img_files(img_dir)]
y = []
tu = []
u = []
t_ann_img = []
imgs = []
if os.path.isfile(u_csv) and os.path.exists(u_csv):
tu, u, t_ann_img = process_u_csv(t, u_csv, save_dir)
factor = segmt_factor
for i, imgf in enumerate(tqdm(list_img_files(img_dir))):
fname = os.path.splitext(os.path.basename(imgf))[0]
fname = str(round(float(fname), 2))
img, raw, bkg, den = preprocess_img(imgf)
plot_bkg_profile(fname, save_dir, raw, bkg)
thr = None
yi = np.mean(img)
if segmt:
if os.path.isfile(segmt_mask) and os.path.exists(segmt_mask):
seg_bound = img_as_float(imread(segmt_mask)) > 0
if adj_bright:
a_reg = img[img > 0]
if os.path.isfile(segmt_mask) and os.path.exists(segmt_mask):
a_reg = seg_bound*img
a_reg = a_reg[a_reg > 0]
if i == 0:
kval = np.mean(a_reg)
segmt_factor = (np.mean(a_reg)/kval) + factor - 1
if segmt_dots:
thr = segment_clusters(den, factor=segmt_factor, rs=remove_small)
else:
thr = segment_object(den, factor=segmt_factor,
rs=remove_small, fh=fill_holes, cb=clear_border)
if os.path.isfile(segmt_mask) and os.path.exists(segmt_mask):
thr *= seg_bound
yi = np.mean(img[thr])
if np.isnan(yi):
yi = 0
y.append(yi)
sig_ann = round(float(fname), 1) in t_ann_img
cell_img = plot_cell_img(den, thr, fname, save_dir,
cmax, sig_ann, t_unit=t_unit, sb_microns=sb_microns)
imgs.append(cell_img)
prog = (i+1)/n_imgs * 100
plot_uy(t, y, tu, u, save_dir, t_unit=t_unit, ulabel=ulabel)
data = np.column_stack((t, y))
np.savetxt(os.path.join(save_dir, 'y.csv'),
data, delimiter=',', header='t,y', comments='')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
mimwrite(os.path.join(save_dir, 'cell.gif'), imgs, fps=len(imgs)//10)
def combine_uy(root_dir, fold_change=True, plot_u=True):
with plt.style.context(('seaborn-whitegrid', custom_styles)):
if plot_u:
fig, (ax0, ax) = plt.subplots(2, 1, sharex=True,
figsize=(16, 10), gridspec_kw={'height_ratios': [1, 8]})
else:
fig, ax = plt.subplots(figsize=(10,8))
combined_t = pd.DataFrame()
combined_y = pd.DataFrame()
combined_tu = pd.DataFrame()
combined_u = pd.DataFrame()
for i, data_dir in enumerate(natsorted([x[1] for x in os.walk(root_dir)][0])):
y_csv = os.path.join(root_dir, data_dir, 'y.csv')
y_data = pd.read_csv(y_csv)
t = y_data['t'].values
y = y_data['y'].values
yf = interp1d(t, y, fill_value='extrapolate')
t = np.arange(round(t[0]), round(t[-1]) + 1, 1)
t = pd.Series(t, index=t, name=i)
combined_t = pd.concat([combined_t, t], axis=1)
y = pd.Series([yf(ti) for ti in t], index=t, name=i)
if fold_change:
y = y/np.mean(y[:5])
combined_y = pd.concat([combined_y, y], axis=1)
ax.plot(y, color='#1976D2', alpha=1, linewidth=1)
u_csv = os.path.join(root_dir, data_dir, 'u.csv')
if plot_u:
u_data = pd.read_csv(u_csv)
tu = u_data['t'].values
tu = pd.Series(tu, index=tu, name=i)
u = | pd.Series(u_data['u'].values, index=tu, name=i) | pandas.Series |
"""
Core classes and functions of the pybps package
"""
# Common imports
import os
import sys
import re
import sqlite3
from copy import deepcopy
from multiprocessing import Pool, cpu_count, freeze_support
from time import time, sleep
from random import uniform
from shutil import copy, copytree
from string import Template
# Third-party imports
import pandas as pd
from pandas.io import sql
# Custom imports
from pybps import util
import pybps.preprocess.trnsys as trnsys_pre
import pybps.preprocess.daysim as daysim_pre
import pybps.postprocess.trnsys as trnsys_post
import pybps.postprocess.daysim as daysim_post
# Handle Python 2/3 compatibility
from six.moves import configparser
import six
if six.PY2:
ConfigParser = configparser.SafeConfigParser
else:
ConfigParser = configparser.ConfigParser
def run_job(job):
"""Prepare, Preprocess, Run and Close a BPSJob
This function is called by the multiprocessing.pool.map method
This function can be overridden by giving a new function to the
self.runjob_func variable. However, the argument should always be "job"
and the function should include methods from the BPSJob class.
"""
print("Running simulation job %s ..." % job.jobID)
job.prepare()
job.preprocess()
job.run()
job.close()
return job.runsumdict
def sort_key_dfcolnames(x):
"""Sort key function for list of pandas DataFrame column names.
Used to put 'JobID' column first in pandas DataFrame"""
if x == 'JobID':
return (0, x)
elif x == 'ModelFile':
return (1, x)
elif x == 'SampleFile':
return (2, x)
else:
return (3, x)
class BPSProject(object):
"""Class that holds all information and methods to manage parametric
building performance simulation projects"""
def __init__(self, path=None, validCheck=True, seriesID='random', startJobID=1):
"""Initialization of BPSProject Class
Args:
path: relative or absolute path to simulation project directory.
If not defined here, the "set_projpath" method should be used.
validCheck: if True (default), checks validity of inputs
seriesID: by default, seriesID is defined automatically (random)
However, the user can force the seriesID using this arg.
startJobID: by default, the start ID for jobs is 1, but this can
be overridden by giving any start number to this arg.
"""
# Create a unique id to identify current serie of job runs
if seriesID == 'random':
self.seriesID = util.random_str(8)
else:
self.seriesID = seriesID
print("\nBatch Series ID: %s" % self.seriesID)
# Set the start index number for jobs (by default: 1)
self.startJobID = startJobID
# A flag to enable or disable validity checking
self.valid_check = validCheck
# Simulation tool to be used
self.simtool = None
# Config info for detected simulation tool
self.config = {}
# Variable that holds the name of the 'run_jobs' function to be used
self.runjob_func = run_job
# True if project is a simulation batch, False otherwise
self._batch = False
# Simulation run time
self.simtime = 0
# Relative path to model file to be used in current run
self.model_relpath = None
# List of jobs to be run
self.jobs = []
# List of dicts containing run summaries for all jobs
self.runsummary = []
# Absolute path to base directory for jobs
self.jobsdir_abspath = None
# Absolute path to jobs results directory
self.resultsdir_abspath = None
# Name of results database
self.db_name = 'SimResults.db'
# Name of jobs csv/pkl file
self.jobs_fname = 'SimJobs'
# Name of results csv/pkl file
self.results_fname = 'SimResults'
# Name of run summary csv/pkl file
self.runsum_fname = 'RunSummary'
# List of relative paths to template simulation files
self.temp_relpaths = []
# List of parameters found in template files
self.temp_params = []
# Relative path to csv file containing list of jobs to be run
self.samp_relpath = None
# List of parameters found in sample file
self.samp_params = []
# Raw sample extracted from csv file. It's a list of dicts with
# each dict holding all parameter for a particular job
self.sample = []
# Pandas DataFrame for jobs list
self.jobs_df = None
# Pandas DataFrame for simulation results
self.results_df = None
# Pandas DataFrame for run summary
self.runsum_df = None
if path is not None:
# Absolute path to simulation project directory
self.abspath = os.path.abspath(path)
print("\nBPS project directory: " + self.abspath)
# Launch method to detect sim tool and store related config info
self.check()
else:
self.abspath = []
def set_projpath(self, path):
"""Define path to simulation project directory
Args:
path: relative or absolute path to simulation project directory.
"""
self.abspath = os.path.abspath(path)
print("\nBPS project directory: " + self.abspath)
self.check()
def get_sample(self, src='samplefile', seriesID=None):
"""Get sample from external source (csv file or sqlite database)
Args:
src: external source that contains sample, either
- "samplefile" (in CSV format)
- "database" (PyBPS-generated SQlite format, NOT IMPLEMENTED!)
seriesID: when getting sample from database, allows to specify the
seriesID of the sample (database can contain multiple samples)
"""
# Empty any previously created jobs list
self.sample = []
if src == 'samplefile':
# Get information needed to find jobs file in folder
samp_sstr = self.config['samplefile_searchstring']
samp_abspathlist = util.get_file_paths([samp_sstr], self.abspath)
# Check if there is no more than 1 sample file in directory
if len(samp_abspathlist) > 0:
samp_relpathlist = [os.path.relpath(fname, self.abspath)
for fname in samp_abspathlist]
if len(samp_relpathlist) > 1:
print('\nVarious sample files found in directory' +
'\nPlease select sample to be used in current run:')
for i, path in enumerate(samp_relpathlist):
print("(%d) %s" % (i+1, os.path.splitext(path)[0]))
select = int(raw_input("Sample ID number: "))
self.samp_relpath = samp_relpathlist[select - 1]
print("You selected %s" % self.samp_relpath)
else:
self.samp_relpath = samp_relpathlist[0]
# Build list of dicts with parameter values for all job runs
samp_abspath = os.path.join(self.abspath, self.samp_relpath)
sample_data = pd.read_csv(samp_abspath)
self.sample = list(sample_data.transpose().to_dict().values())
# Add model and sample file names as parameters
for s in self.sample:
s['ModelFile'] = self.model_relpath
s['SampleFile'] = self.samp_relpath
else:
sys.stderr.write("Could not find any sample file in " +
"project directory\nPlease put a \'" + samp_sstr +
"\' file in directory and re-run 'get_sample' method\n")
elif src == 'database' and not seriesID:
print("\nPlease provide a seriesID to retrieve parameter list" +
" from database")
def get_parameterlist(self, src):
"""Returns list of parameters found in sample or template files.
Parameter list is returned sorted after eliminating duplicates.
Args:
src: file to be searched, either "sample" or "tempfile"
Returns:
Sorted list of parameters
"""
if src == 'sample':
self.samp_params = sorted(self.sample[0].keys())
elif src == 'tempfile':
pattern = re.compile(r'%(.*?)%')
self.temp_params = []
for temp_relpath in self.temp_relpaths:
# Open jobs file
temp_abspath = os.path.join(self.abspath, temp_relpath)
with open(temp_abspath, 'rU') as tmp_f:
# Read the entire file and store it in a temporary variable
temp = tmp_f.read()
# Build a list of all paramaters found in file
# Parameters are identified as strings surrounded by '%'
self.temp_params.extend(pattern.findall(temp))
# Remove duplicates, then sort list
self.temp_params = list(set(self.temp_params))
self.temp_params.sort()
else:
print("Unrecognized argument.")
def add_jobs(self):
"""Add simulation jobs to BPSProject
Simulation jobs are created and added to BPSProject only when this
function is called. This makes it possible to transform the sample
loaded from an external source (done automatically when BPSProject
class is initialized if a sample file is found in project directory)
prior to creating and adding jobs to the BPSProject. This can come in
handy is the variables in your sample differ from the parameters you
need for your model.
Args:
No args
Returns:
Warning messages if sample parameters don't match parameters found
in template files
"""
# Create main directory for simulation jobs if it doesn't exists
self.jobsdir_abspath = os.path.join(self.abspath, '../_pybps_simulations')
util.tmp_dir('create', self.jobsdir_abspath)
# Create directory to store jobs results in main project directory
# if it doesn't already exists
self.resultsdir_abspath = os.path.join(self.abspath, '../_pybps_results')
util.tmp_dir('create', self.resultsdir_abspath)
# Remove any previously created job
self.jobs = []
# Then, add jobs
if self._batch:
njob = len(self.sample)
# Get list of all parameters found in template files
self.get_parameterlist('tempfile')
self.get_parameterlist('sample')
if self.valid_check == True:
# Check if template files and jobs file contain the same list
# of parameters. Raise an error if not
if set(self.temp_params).issubset(self.samp_params):
for jobID in range(self.startJobID, self.startJobID + njob):
self.jobs.append(BPSJob(self, jobID))
print("\n%d jobs added to BPSProject instance" % njob)
else:
print("\nMismatch between template and sample file" +
" parameters!\nNo jobs added to BPSproject instance")
else:
for jobID in (self.startJobID, self.startJobID + njob):
self.jobs.append(BPSJob(self, jobID))
print("\n%d jobs added to BPSProject instance" % njob)
else:
print("\nBPS project not a batch run. Jobs can't be added")
def check(self):
"""Check for simulation files in project directory
Checks whether the necessary files are found in the indicated project
directory. Based on file extensions, simulation tool to be used is
detected.
Args:
No args
Returns:
Warning messages if some important files are missing that make it
impossible to run the parametric simulation project
"""
# Get information from config file
conf = ConfigParser()
conf_file = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'config.ini')
for file in os.listdir(self.abspath):
if file.endswith('config.ini'):
conf_file = os.path.join(self.abspath, file)
print('Custom ' + file + ' config file will ' +
'be used instead of default config.ini')
conf.read(conf_file)
sections = conf.sections()
# Detect simulation tool used for current simulation job and check if
# basic simulation input files are there
found = 0 # Variable to store whether a simulation project was found
for section in sections:
# Get information needed to find model files in folder
model_ext = conf.get(section, 'ModelFile_Extensions')
model_ext = model_ext.split(',')
tmp_sstr = conf.get(section, 'TemplateFile_SearchString')
# Check if we can find a model file for the selected simtool
model_abspathlist = util.get_file_paths(model_ext, self.abspath)
if model_abspathlist:
model_relpathlist = [os.path.relpath(fname, self.abspath)
for fname in model_abspathlist]
# If another simtool was already detected, raise an error
if found > 0:
sys.stderr.write("\nInput files for different BPS " +
"tools found\nNo more than 1 kind of simulation " +
"file allowed in a same folder")
sys.exit(1)
# If not, store the name of detected simulation tool
self.simtool = section
print(self.simtool + " simulation project found in directory")
# Strip search string from model file names
for i, relpath in enumerate(model_relpathlist):
match = re.search(r'(.*)' + tmp_sstr + r'(.*)',
os.path.basename(relpath))
if match:
relpath = os.path.join(os.path.dirname(
relpath), match.group(1) + match.group(2))
model_relpathlist[i] = relpath
# If more than 1 model file found, ask user to select 1 model
if len(model_relpathlist) > 1:
if (len(model_relpathlist) == 2 and
model_relpathlist[0] == model_relpathlist[1]):
self.model_relpath = model_relpathlist[0]
else:
print('\nVarious model files found in directory' +
'\nPlease select model to be used in current run:')
print("(%d) %s" % (0, 'all models'))
for i, path in enumerate(model_relpathlist):
print("(%d) %s" % (i+1, os.path.splitext(
os.path.basename(path))[0]))
select = int(raw_input("Model ID number: "))
if select == 0:
self.model_relpath = model_relpathlist
print('You selected all models')
else:
self.model_relpath = model_relpathlist[select - 1]
print("You selected %s" % self.model_relpath)
else:
self.model_relpath = model_relpathlist[0]
found += 1
if found == 0:
sys.stderr.write("\nNo BPS project found in the specified " +
"folder\nPlease check the folder path is correct and " +
"simulation files are in given folder")
sys.exit(1)
# Once simulation tool has been detected, store config info in 'config'
items = conf.items(self.simtool)
for (name, value) in items:
self.config[name] = value
# Once we have found a simulation project and stored config info,
# let's see if we can find a template file for this project
tmpfile_sstr = self.config['templatefile_searchstring']
temp_abspathlist = util.get_file_paths([tmpfile_sstr], self.abspath)
if temp_abspathlist:
self._batch = True
self.temp_relpaths = [os.path.relpath(f_name, self.abspath)
for f_name in temp_abspathlist]
# If template(s) found, check directory for sample file
self.get_sample()
# Identify project as batch run if user selected to run all models
elif len(self.model_relpath) > 1:
print("All model files will be run in batch mode when 'run' " +
"method is called.")
self._batch = True
# Add model and sample files relative paths as parameters
for i,(m,s) in enumerate(zip(self.model_relpath,self.samp_relpath)):
self.sample.append({})
self.sample[i]['ModelFile'] = m
self.sample[i]['SampleFile'] = s
# If no template file was found, give message to user
else:
print("No template found. BPS project identified as single run")
def run(self, ncore=-1, stopwatch=False, run_mode='silent', debug=False):
"""Run simulation jobs
Args:
ncore: number of local cores/threads to be used at a time
For ncore>=2, jobs will run in parallel.
By default (ncore=-1), the max number of local cores is used
stopwatch: flag to activate a stopwatch that monitors job run time
run_mode: for simulation tool that have this kind of command line
flag, allows to run tools in silent or continuous mode
For example, 'silent' runs TRNSYS with '/h' flag and
'nostop' runs TRNSYS with '/n' flag
debug: by default, simulation tool's standard output is captured
and therefore does not appear on screens. if debug is set to
'True' any output text return by the simuation tool is printed
(useful for debuggingsimulation model)
Returns:
Info message for current simulation job run
"""
#Create executable path for selected simulation tool
if self.simtool == 'TRNSYS':
executable_abspath = self.config['trnexe_path']
silent_flag = '/h'
nostop_flag = '/n'
elif self.simtool == 'DAYSIM':
executable_abspath = self.config['exe_path']
silent_flag = ''
nostop_flag = ''
# If simulation project is identified as single run, directly
# call simulation tool to run simulation
if self._batch == False:
# Build absolute path to model file
model_abspath = os.path.join(self.abspath, self.model_relpath)
# Run the simulation, by default in silent mode
if run_mode == 'silent': flag = silent_flag
elif run_mode == 'nostop': flag = nostop_flag
elif run_mode == 'normal': flag = None
cmd = [executable_abspath, model_abspath, flag]
# Measure simulation run time
start_time = time()
# Launch command
if debug == False:
util.run_cmd(cmd)
else:
util.run_cmd(cmd, debug=True)
# Save simulation time
self.simtime = round(time() - start_time, 3)
# If simulation project corresponds to a batch run, run jobs
# in parallel
else:
# Check first if there are some jobs defined
if self.jobs:
print('\nStarting batch run ...')
# Start timer if stopwatch requested by user
if stopwatch == True:
start_time = time()
# Create multiprocessing pool for parallel subprocess run
if ncore <= 0:
pool = Pool(None)
print(str(cpu_count()) +
' core(s) used in current run (max local cores)\n')
else:
pool = Pool(ncore)
print(str(ncore) + ' core(s) used in current run\n')
# This method automatically assigns processes to available
# cores and the entire operation stops when all values from
# jobs from the jobs list have been evaluated.
# A callback function is used to retrieve run summary from job
# and store it in runsummary list
r = pool.map_async(self.runjob_func, self.jobs, chunksize=1,
callback=self.runsummary.extend)
r.wait()
pool.close()
pool.join()
# Stop timer if stopwatch requested by user
if stopwatch == True:
self.simtime = time()-start_time
print('\nSimulation batch runtime: {:.2f} seconds'.format(self.simtime))
# Print an error message if no jobs were found
else:
print("\nNo simulation jobs found" +
"\n\nYou should first add simulation jobs to your BPSProject" +
"with the 'add_job' or " +
"\n'add_jobs' methods prior to calling the 'run' method")
def jobs2df(self):
"""Create pandas DataFrame from sample"""
# Build a 'pandas' DataFrame with all jobs parameters
jobdict_list = []
jobsdf_index = []
for job in self.jobs:
jobsdf_index.append(job.seriesID + '_' + job.jobID)
jobdict_list.append(job.jobdict)
colnames = sorted(jobdict_list[0].keys())
self.jobs_df = pd.DataFrame(jobdict_list, columns=colnames, index=jobsdf_index)
def runsum2df(self):
"""Create pandas DataFrame from run summary"""
# Build a 'pandas' DataFrame with run summaries for all jobs
colnames = ['JobID','Message','Warnings','Errors','SimulTime(sec)']
self.runsum_df = pd.DataFrame(self.runsummary, columns=colnames)
def results2df(self):
"""Create pandas DataFrame from simulation results"""
# Get extensions of results files
results_ext = self.config['resultfile_extensions']
results_ext = results_ext.split(',')
# Get list of paths to results files
results_abspathlist = util.get_file_paths(results_ext,
self.resultsdir_abspath)
# Go through all results files from all simulated jobs
df_exists = False
for results_abspath in results_abspathlist:
# Get Series/Job IDs
match = re.search(r'([A-Z0-9]{8})_[0-9]{5}', results_abspath)
if match:
# Only parse results within sub-folders pertaining to
# current batch run identified by seriesID
if match.group(1) == self.seriesID:
# Build a 'pandas' dataframe with results from all jobs
if self.simtool == 'TRNSYS':
dict_list = trnsys_post.parse_type46(
results_abspath)
elif self.simtool == 'DAYSIM':
if (os.path.splitext(os.path.basename(
results_abspath))[1] == '.htm'):
dict_list = daysim_post.parse_el_lighting(
results_abspath)
if dict_list:
for dict in dict_list:
dict['JobID'] = match.group()
colnames = dict_list[0].keys()
#colnames.sort(key = sort_key_dfcolnames)
if not df_exists:
self.results_df = pd.DataFrame(dict_list,
columns=colnames)
df_exists = True
else:
df = pd.DataFrame(dict_list, columns=colnames)
self.results_df = self.results_df.append(df,
ignore_index=True)
else:
print("No results dataframe created")
def save2db(self, items='all'):
"""Save project jobs/results to sql database
Args:
items: 'jobs','results' and 'runsummary' respectively save jobs,
results or run summary to the database; 'all' saves everything
"""
db_abspath = os.path.join(self.resultsdir_abspath, self.db_name)
cnx = sqlite3.connect(db_abspath)
if items == 'all' or items == 'jobs':
self.jobs_df.to_sql(name='Jobs', con=cnx, if_exists='append')
if items == 'all' or items == 'results':
self.results_df.to_sql(name='Results', con=cnx, if_exists='append')
if items == 'all' or items == 'runsummary':
self.runsum_df.to_sql(name='RunSummary', con=cnx, if_exists='append')
cnx.close()
def save2csv(self, items='all'):
"""Save project jobs/results to csv
Args:
items: 'jobs','results' and 'runsummary' respectively save jobs,
results or run summary to the csv file; 'all' saves everything
"""
jobscsv_abspath = os.path.join(self.resultsdir_abspath,
self.jobs_fname + '.csv')
resultscsv_abspath = os.path.join(self.resultsdir_abspath,
self.results_fname + '.csv')
runsumcsv_abspath = os.path.join(self.resultsdir_abspath,
self.runsum_fname + '.csv')
if items == 'all' or items == 'jobs':
self.jobs_df.to_csv(jobscsv_abspath)
if items == 'all' or items == 'results':
self.results_df.to_csv(resultscsv_abspath)
if items == 'all' or items == 'runsummary':
self.runsum_df.to_csv(runsumcsv_abspath)
def save2pkl(self, items='all'):
"""Save project jobs/results to pickled dataframe
Args:
items: 'jobs','results' and 'runsummary' respectively save jobs,
results or run summary to the pickled file; 'all' saves everything
"""
jobspkl_abspath = os.path.join(self.resultsdir_abspath,
self.jobs_fname + '.pkl')
resultspkl_abspath = os.path.join(self.resultsdir_abspath,
self.results_fname + '.pkl')
runsumpkl_abspath = os.path.join(self.resultsdir_abspath,
self.runsum_fname + '.pkl')
if items == 'all' or items == 'jobs':
self.jobs_df.to_pickle(jobspkl_abspath)
if items == 'all' or items == 'results':
self.results_df.to_pickle(resultspkl_abspath)
if items == 'all' or items == 'runsummary':
self.runsum_df.to_pickle(runsumpkl_abspath)
def getfromdb_jobs(self, seriesID=None, db_name="SimResults.db"):
"""Get jobs from database
NOT IMPLEMENTED YET! DO NOT USE!
"""
self.seriesID = seriesID
self.db_abspath = os.path.join(self.resultsdir_abspath, db_name)
cnx = sqlite3.connect(self.db_abspath)
if self.seriesID:
sql_query = (r"SELECT * FROM Parameters WHERE _JOB_ID LIKE '%s%%'"
% seriesID)
else:
sql_query = r"SELECT * FROM Parameters"
parameters_df = sql.read_frame(sql_query, cnx)
cnx.close()
return parameters_df
def getfromdb_results(self, seriesID=None, month=None, db_name="SimResults.db"):
"""Get results from database
NOT IMPLEMENTED YET! DO NOT USE!
"""
self.db_abspath = os.path.join(self.resultsdir_abspath, db_name)
cnx = sqlite3.connect(self.db_abspath)
if seriesID and month:
sql_query = (r"SELECT * FROM Results WHERE (_JOB_ID LIKE '%s%%')" +
" AND (Month LIKE '%s')" % (seriesID, month))
elif seriesID:
sql_query = (r"SELECT * FROM Results WHERE _JOB_ID LIKE '%s%%'"
% seriesID)
else:
sql_query = r"SELECT * FROM Results"
results_df = | sql.read_frame(sql_query, cnx) | pandas.io.sql.read_frame |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-0.005 * x))
def sigmoid_derivative(x):
return 0.005 * x * (1 - x)
def read_and_divide_into_train_and_test(csv_file):
# Reading csv file here
df = pd.read_csv(csv_file)
# Dropping unnecessary column
df.drop(['Code_number'], axis=1, inplace=True)
# Replacing missing values in the Bare Nuclei column with mean of rest of the values
df['Bare_Nuclei'] = | pd.to_numeric(df['Bare_Nuclei'], errors='coerce') | pandas.to_numeric |
#v1.0
#v0.9 - All research graph via menu & mouse click
#v0.8 - Candlestick graphs
#v0.7 - Base version with all graphs and bug fixes
#v0.6
import pandas as pd
from pandas import DataFrame
from alpha_vantage.timeseries import TimeSeries
from alpha_vantage.techindicators import TechIndicators
class PrepareTestData():
def __init__(self, argFolder=None, argOutputSize='compact'):
super().__init__()
#argFolder='./scriptdata'
self.folder = argFolder + '/'
self.outputsize = argOutputSize.lower()
def loadDaily(self, argScript):
try:
if(self.outputsize == 'compact'):
filename=self.folder + 'daily_compact_'+argScript+'.csv'
else:
filename=self.folder + 'daily_full_'+argScript+'.csv'
csvdf = pd.read_csv(filename)
csvdf=csvdf.rename(columns={'open':'1. open', 'high':'2. high', 'low':'3. low', 'close':'4. close', 'volume': '5. volume'})
convert_type={'1. open':float, '2. high':float, '3. low':float, '4. close':float, '5. volume':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('timestamp', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadIntra(self, argScript):
try:
if(self.outputsize == 'compact'):
filename=self.folder + 'intraday_5min_compact_'+argScript+'.csv'
else:
filename=self.folder + 'intraday_5min_full_'+argScript+'.csv'
csvdf = pd.read_csv(filename)
csvdf=csvdf.rename(columns={'open':'1. open', 'high':'2. high', 'low':'3. low', 'close':'4. close', 'volume': '5. volume'})
convert_type={'1. open':float, '2. high':float, '3. low':float, '4. close':float, '5. volume':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('timestamp', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadSMA(self, argScript='', argPeriod=20):
try:
#if(argPeriod == 0):
# csvdf = pd.read_csv(self.folder + 'SMA_'+argScript+'.csv')
#else:
csvdf = pd.read_csv(self.folder + 'SMA_'+str(argPeriod)+ '_'+argScript+'.csv')
convert_type={'SMA':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_sma(argScript)
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadEMA(self, argScript):
try:
csvdf = pd.read_csv(self.folder + 'EMA_'+argScript+'.csv')
convert_type={'EMA':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = | pd.to_datetime(csvdf.index) | pandas.to_datetime |
"""
In the memento task, the behavioral responses of participants were written to
log files.
However, different participants played different versions of the task, and
different versions of the task saved a different amount of variables as a
Matlab struct into the log file.
This file contains information on the variables and their indexes per subject.
Indexing is done according to Python, i.e., zero-based.
"""
import logging
from pymento_meg.config import subjectmapping
from scipy.io import loadmat
from pathlib import Path
import pandas as pd
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
def get_behavioral_data(subject, behav_dir, fieldname, variable=None):
"""
Read in behavioral data and return the values of one variable.
:param subject:
:param behav_dir: Path to the directory that contains subject-specific
log directories (e.g., data/DMS_MEMENTO/Data_Behav/Data_Behav_Memento)
:param fieldname: Fieldname where the variable is in. Can be "probmagrew",
"single_onset", "disptimes", or "onset"
:param variable: str, variable name that should be retrieved. If None is
specified, it will get all variables of this fieldname
:return:
"""
key = f"memento_{subject}"
logging.info(f"Reading in experiment log files of {key} for {fieldname}...")
# get the information about the subject's behavioral data out of the subject
# mapping, but make sure it is actually there first
assert key in subjectmapping.keys()
subinfo = subjectmapping[key]
# based on the information in subinfo, load the file
fname = subinfo["logfilename"]
path = Path(behav_dir) / fname
res = loadmat(path)
# get the subject ID out of the behavioral data struct. It is buried quite
# deep, and typically doesn't have a leading zero
subID = str(res["mementores"]["subID"][0][0][0][0])
assert subID in subject
# first, retrieve all possible variables given the fieldname
var = subinfo[fieldname]
if variable:
# make sure the required variable is inside this list
assert variable in var
# get the index from the required variable. This is necessary to index
# the struct in the right place. Only fieldnames seem to be indexable
# by name, not their variables
idx = var.index(variable)
# for all relevant fieldnames, it takes two [0] indices to get to an
# unnested matrix of all variables
wanted_var = res["mementores"][fieldname][0][0][idx]
return wanted_var
else:
return res["mementores"][fieldname][0][0]
def write_to_df(participant, behav_dir, bids_dir, write_out=False):
"""
Write logfile data to a dataframe to get rid of the convoluted matlab
structure.
All variables should exist 510 times.
:param: str, subject identifier in the form of "001"
"""
# read the data in as separate dataframes
# Onset times are timestamps! View with datetime
# first, get matlab data
onsets = get_behavioral_data(
subject=participant, behav_dir=behav_dir, fieldname="onsets"
)
disps = get_behavioral_data(
subject=participant, behav_dir=behav_dir, fieldname="disptimes"
)
probs = get_behavioral_data(
subject=participant, behav_dir=behav_dir, fieldname="probmagrew"
)
# we need to transpose the dataframe to get variables as columns and
# trials as rows
df_onsets = pd.DataFrame(onsets).transpose()
df_disps = pd.DataFrame(disps).transpose()
df_probs = pd.DataFrame(probs).transpose()
# set header:
df_onsets.columns = subjectmapping[f"memento_{participant}"]["onsets"]
df_disps.columns = subjectmapping[f"memento_{participant}"]["disptimes"]
df_probs.columns = subjectmapping[f"memento_{participant}"]["probmagrew"]
# assert that all series are monotonically increasing in onsets, but skip
# Series with NaNs:
assert all(
[
df_onsets[i].is_monotonic
for i in df_onsets.columns
if not df_onsets[i].isna().values.any()
]
)
assert all([d.shape[0] == 510 for d in [df_disps, df_onsets, df_probs]])
# concatenate the dataframes to one
df = | pd.concat([df_disps, df_onsets, df_probs], axis=1) | pandas.concat |
import os
import pickle
import argparse
import pandas as pd
from gensim.models import (Word2Vec, KeyedVectors)
from gensim.models.fasttext import FastText
from util.params import Params
"""
Possibly useful resources:
https://radimrehurek.com/gensim/scripts/glove2word2vec.html
https://rare-technologies.com/word2vec-tutorial/
https://codesachin.wordpress.com/2015/10/09/generating-a-word2vec-model-from-a-block-of-text-using-gensim-python/
"""
def __train__(sentences, model=None, EmbeddingModel=Word2Vec):
"""Sentence input should be in format:
[['first', 'sentence'], ['second', 'sentence'], ..., ['last', 'sentence']]
"""
# initialize model
if model is None:
# New model
model = EmbeddingModel(None, size=100, window=5, min_count=5, workers=4, iter=20)
model.build_vocab(sentences)
else:
model.build_vocab(sentences, update=True)
# Train
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
return model
def train(training_path="dumps/v1", save_to="dumps/v1/train_word2vec.model", word2vec=True):
model = None
# Load sentences by batch
for i in range(1,11):
print("Loading batch {}".format(i))
with open(os.path.join(training_path, "train_{}.pkl".format(i)), "rb") as f:
train_set = pickle.load(f)
print("Done")
print("Training {} on batch {}".format("Word2Vec" if word2vec else "FastText", i))
EmbeddingModel = Word2Vec if word2vec else FastText
df_train = | pd.DataFrame(train_set) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Simply downloads email attachments.
Uses this handy package: https://pypi.org/project/imap-tools/
"""
import io
from os.path import join
import os
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
from imap_tools import MailBox, A, AND
def get_from_email(start_date, end_date, mail_server,
account, sender, password):
"""
Get raw data from email account.
Args:
start_date: datetime.datetime
pull data from email received from the start date
end_date: datetime.datetime
pull data from email received on/before the end date
mail_server: str
account: str
email account to receive new data
sender: str
email account of the sender
password: str
password of the <PASSWORD>
output:
df: pd.DataFrame
"""
time_flag = None
df = pd.DataFrame(columns=['SofiaSerNum', 'TestDate', 'Facility', 'City',
'State', 'Zip', 'PatientAge', 'Result1', 'Result2',
'OverallResult', 'County', 'FacilityType', 'Assay',
'SCO1', 'SCO2', 'CLN', 'CSN', 'InstrType',
'StorageDate', 'ResultId', 'SarsTestNumber'])
with MailBox(mail_server).login(account, password, 'INBOX') as mailbox:
for search_date in [start_date + timedelta(days=x)
for x in range((end_date - start_date).days + 1)]:
for message in mailbox.fetch(A(AND(date=search_date.date(), from_=sender))):
for att in message.attachments:
name = att.filename
# Only consider covid tests
if "Sars" not in name:
continue
print("Pulling data received on %s"%search_date.date())
toread = io.BytesIO()
toread.write(att.payload)
toread.seek(0) # reset the pointer
newdf = pd.read_excel(toread) # now read to dataframe
df = df.append(newdf)
time_flag = search_date
return df, time_flag
def fix_zipcode(df):
"""Fix zipcode that is 9 digit instead of 5 digit."""
zipcode5 = []
fixnum = 0
for zipcode in df['Zip'].values:
if isinstance(zipcode, str) and '-' in zipcode:
zipcode5.append(int(zipcode.split('-')[0]))
fixnum += 1
else:
zipcode = int(float(zipcode))
zipcode5.append(zipcode)
df['zip'] = zipcode5
# print('Fixing %.2f %% of the data' % (fixnum * 100 / len(zipcode5)))
return df
def fix_date(df):
"""
Remove invalid dates and select correct test date to use.
Quidel Covid Test are labeled with Test Date and Storage Date. In principle,
the TestDate should reflect when the test was performed and the StorageDate
when the test was logged in the MyVirena cloud storage device. We expect
that the test date should precede the storage date by several days. However,
in the actual data the test date can be far earlier than the storage date
and the test date can also occur after the storage date.
- For most of the cases, use test date as the timestamp
- Remove tests with a storage date which is earlier than the test date
- If the storage date is 90 days later than the test date, the storage
will be adopted instead
"""
df.insert(2, "timestamp", df["TestDate"])
mask = df["TestDate"] <= df["StorageDate"]
print("Removing %.2f%% of unusual data" % ((len(df) - np.sum(mask)) * 100 / len(df)))
df = df[mask]
mask = df["StorageDate"] - df["TestDate"] > | pd.Timedelta(days=90) | pandas.Timedelta |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_difference(self):
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=5)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=3)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = rng4
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(
['2000-01-01 09:01', '2000-01-01 09:05'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=3)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('2006-01-01', freq='A', periods=2)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3),
(rng4, other4, expected4),
(rng5, other5, expected5),
(rng6, other6, expected6),
(rng7, other7, expected7), ]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with tm.assertRaises(TypeError):
rng - other
with tm.assertRaises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_comp_nat(self):
left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT,
pd.Period('2011-01-03')])
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
| tm.assert_numpy_array_equal(l < pd.NaT, expected) | pandas.util.testing.assert_numpy_array_equal |
from functools import partial
import pandas as pd
import sqlalchemy as sa
from airflow.operators.python_operator import PythonOperator
from dataflow.dags import _PipelineDAG
from dataflow.operators.common import fetch_from_api_endpoint, fetch_from_hosted_csv
from dataflow.operators.covid19 import fetch_apple_mobility_data
from dataflow.operators.csv_inputs import fetch_mapped_hosted_csvs
from dataflow.operators.db_tables import query_database
from dataflow.utils import TableConfig
class OxfordCovid19GovernmentResponseTracker(_PipelineDAG):
source_url = 'https://oxcgrtportal.azurewebsites.net/api/CSVDownload'
allow_null_columns = True
use_utc_now_as_source_modified = True
table_config = TableConfig(
table_name='oxford_covid19_government_response_tracker',
field_mapping=[
('CountryName', sa.Column('country_name', sa.String)),
('CountryCode', sa.Column('country_code', sa.String)),
('RegionName', sa.Column('region_name', sa.String)),
('RegionCode', sa.Column('region_code', sa.String)),
('Date', sa.Column('date', sa.Numeric)),
('C1_School closing', sa.Column('c1_school_closing', sa.Numeric)),
('C1_Flag', sa.Column('c1_flag', sa.Numeric)),
('C1_Notes', sa.Column('c1_notes', sa.Text)),
('C2_Workplace closing', sa.Column('c2_workplace_closing', sa.Numeric)),
('C2_Flag', sa.Column('c2_flag', sa.Numeric)),
('C2_Notes', sa.Column('c2_notes', sa.Text)),
(
'C3_Cancel public events',
sa.Column('c3_cancel_public_events', sa.Numeric),
),
('C3_Flag', sa.Column('c3_flag', sa.Numeric)),
('C3_Notes', sa.Column('c3_notes', sa.Text)),
(
'C4_Restrictions on gatherings',
sa.Column('c4_restrictions_on_gatherings', sa.Numeric),
),
('C4_Flag', sa.Column('c4_flag', sa.Numeric)),
('C4_Notes', sa.Column('c4_notes', sa.Text)),
(
'C5_Close public transport',
sa.Column('c5_close_public_transport', sa.Numeric),
),
('C5_Flag', sa.Column('c5_flag', sa.Numeric)),
('C5_Notes', sa.Column('c5_notes', sa.Text)),
(
'C6_Stay at home requirements',
sa.Column('c6_stay_at_home_requirements', sa.Numeric),
),
('C6_Flag', sa.Column('c6_flag', sa.Numeric)),
('C6_Notes', sa.Column('c6_notes', sa.Text)),
(
'C7_Restrictions on internal movement',
sa.Column('c7_restrictions_on_internal_movement', sa.Numeric),
),
('C7_Flag', sa.Column('c7_flag', sa.Numeric)),
('C7_Notes', sa.Column('c7_notes', sa.Text)),
(
'C8_International travel controls',
sa.Column('c8_international_travel_controls', sa.Numeric),
),
('C8_Notes', sa.Column('c8_notes', sa.Text)),
('E1_Income support', sa.Column('e1_income_support', sa.Numeric)),
('E1_Flag', sa.Column('e1_flag', sa.Numeric)),
('E1_Notes', sa.Column('e1_notes', sa.Text)),
(
'E2_Debt/contract relief',
sa.Column('e2_debt_contract_relief', sa.Numeric),
),
('E2_Notes', sa.Column('e2_notes', sa.Text)),
('E3_Fiscal measures', sa.Column('e3_fiscal_measures', sa.Numeric)),
('E3_Notes', sa.Column('e3_notes', sa.Text)),
(
'E4_International support',
sa.Column('e4_international_support', sa.Numeric),
),
('E4_Notes', sa.Column('e4_notes', sa.Text)),
(
'H1_Public information campaigns',
sa.Column('h1_public_information_campaigns', sa.Numeric),
),
('H1_Flag', sa.Column('h1_flag', sa.Numeric)),
('H1_Notes', sa.Column('h1_notes', sa.Text)),
('H2_Testing policy', sa.Column('h2_testing_policy', sa.Numeric)),
('H2_Notes', sa.Column('h2_notes', sa.Text)),
('H3_Contact tracing', sa.Column('h3_contact_tracing', sa.Numeric)),
('H3_Notes', sa.Column('h3_notes', sa.Text)),
(
'H4_Emergency investment in healthcare',
sa.Column('h4_emergency_investment_in_healthcare', sa.Numeric),
),
('H4_Notes', sa.Column('h4_notes', sa.Text)),
(
'H5_Investment in vaccines',
sa.Column('h5_investment_in_vaccines', sa.Numeric),
),
('H5_Notes', sa.Column('h5_notes', sa.Text)),
('M1_Wildcard', sa.Column('m1_wildcard', sa.Numeric)),
('M1_Notes', sa.Column('m1_notes', sa.Text)),
('ConfirmedCases', sa.Column('confirmed_cases', sa.Numeric)),
('ConfirmedDeaths', sa.Column('confirmed_deaths', sa.Numeric)),
('StringencyIndex', sa.Column('stringency_index', sa.Float)),
(
'StringencyIndexForDisplay',
sa.Column('stringency_index_for_display', sa.Float),
),
('StringencyLegacyIndex', sa.Column('stringency_legacy_index', sa.Float)),
(
'StringencyLegacyIndexForDisplay',
sa.Column('stringency_legacy_index_for_display', sa.Float),
),
(
'GovernmentResponseIndex',
sa.Column('government_response_index', sa.Float),
),
(
'GovernmentResponseIndexForDisplay',
sa.Column('government_response_index_for_display', sa.Float),
),
('ContainmentHealthIndex', sa.Column('containment_health_index', sa.Float)),
(
'ContainmentHealthIndexForDisplay',
sa.Column('containment_health_index_for_display', sa.Float),
),
('EconomicSupportIndex', sa.Column('economic_support_index', sa.Float)),
(
'EconomicSupportIndexForDisplay',
sa.Column('economic_support_index_for_display', sa.Float),
),
],
)
def get_fetch_operator(self) -> PythonOperator:
return PythonOperator(
task_id='run-fetch',
python_callable=partial(fetch_from_hosted_csv, allow_empty_strings=False),
provide_context=True,
op_args=[
self.table_config.table_name, # pylint: disable=no-member
self.source_url,
],
retries=self.fetch_retries,
)
class CSSECovid19TimeSeriesGlobal(_PipelineDAG):
# Run after the daily update of data ~4am
schedule_interval = '0 7 * * *'
use_utc_now_as_source_modified = True
_endpoint = "https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series"
source_urls = {
"confirmed": f"{_endpoint}/time_series_covid19_confirmed_global.csv",
"recovered": f"{_endpoint}/time_series_covid19_recovered_global.csv",
"deaths": f"{_endpoint}/time_series_covid19_deaths_global.csv",
}
table_config = TableConfig(
table_name="csse_covid19_time_series_global",
field_mapping=[
("Province/State", sa.Column("province_or_state", sa.String)),
("Country/Region", sa.Column("country_or_region", sa.String)),
("Lat", sa.Column("lat", sa.Numeric)),
("Long", sa.Column("long", sa.Numeric)),
("source_url_key", sa.Column("type", sa.String)),
("Date", sa.Column("date", sa.Date)),
("Value", sa.Column("value", sa.Numeric)),
],
)
def transform_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:
df = df.melt(
id_vars=["Province/State", "Country/Region", "Lat", "Long"],
var_name="Date",
value_name="Value",
)
df["Date"] = | pd.to_datetime(df['Date']) | pandas.to_datetime |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from meterstick import metrics
from meterstick import operations
import mock
import numpy as np
import pandas as pd
from pandas import testing
import unittest
class MetricTest(unittest.TestCase):
"""Tests general features of Metric."""
df = pd.DataFrame({'X': [0, 1, 2, 3], 'Y': [0, 1, 1, 2]})
def test_precompute(self):
metric = metrics.Metric(
'foo',
precompute=lambda df, split_by: df[split_by],
compute=lambda x: x.sum().values[0])
output = metric.compute_on(self.df, 'Y')
expected = pd.DataFrame({'foo': [0, 2, 2]}, index=range(3))
expected.index.name = 'Y'
testing.assert_frame_equal(output, expected)
def test_compute(self):
metric = metrics.Metric('foo', compute=lambda x: x['X'].sum())
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_postcompute(self):
def postcompute(values, split_by):
del split_by
return values / values.sum()
output = metrics.Sum('X', postcompute=postcompute).compute_on(self.df, 'Y')
expected = operations.Distribution('Y',
metrics.Sum('X')).compute_on(self.df)
expected.columns = ['sum(X)']
testing.assert_frame_equal(output.astype(float), expected)
def test_compute_slices(self):
def _sum(df, split_by):
if split_by:
df = df.groupby(split_by)
return df['X'].sum()
metric = metrics.Metric('foo', compute_slices=_sum)
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_final_compute(self):
metric = metrics.Metric(
'foo', compute=lambda x: x, final_compute=lambda *_: 2)
output = metric.compute_on(None)
self.assertEqual(output, 2)
def test_pipeline_operator(self):
m = metrics.Count('X')
testing.assert_frame_equal(
m.compute_on(self.df), m | metrics.compute_on(self.df))
class SimpleMetricTest(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 2, 2, 3, 4],
'Y': [3, 1, 1, 4, 4, 3, 5],
'grp': ['A'] * 3 + ['B'] * 4
})
def test_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_single_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"', 'Y < 2'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A" and Y < 2')['X'].mean()
self.assertEqual(output, expected)
def test_count_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 7)
def test_count_split_by_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].count()
expected.name = 'count(X)'
testing.assert_series_equal(output, expected)
def test_count_where(self):
metric = metrics.Count('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 3)
def test_count_with_nan(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3, 4]})
metric = metrics.Count('X')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 6)
def test_count_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'count(X)': [7]})
testing.assert_frame_equal(output, expected)
def test_count_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [7]}, index=['count(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_count_split_by_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'count(X)': [3, 4]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_count_split_by_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 4],
'grp': ['A', 'B']
},
index=['count(X)', 'count(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_count_distinct(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3]})
metric = metrics.Count('X', distinct=True)
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 3)
def test_sum_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 14)
def test_sum_split_by_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].sum()
expected.name = 'sum(X)'
testing.assert_series_equal(output, expected)
def test_sum_where(self):
metric = metrics.Sum('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].sum()
self.assertEqual(output, expected)
def test_sum_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X)': [14]})
testing.assert_frame_equal(output, expected)
def test_sum_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [14]}, index=['sum(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X)': [3, 11]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 11],
'grp': ['A', 'B']
},
index=['sum(X)', 'sum(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_dot_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, sum(self.df.X * self.df.Y))
def test_dot_split_by_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
self.df['X * Y'] = self.df.X * self.df.Y
expected = self.df.groupby('grp')['X * Y'].sum()
expected.name = 'sum(X * Y)'
testing.assert_series_equal(output, expected)
def test_dot_where(self):
metric = metrics.Dot('X', 'Y', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
d = self.df.query('grp == "A"')
self.assertEqual(output, sum(d.X * d.Y))
def test_dot_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X * Y)': [sum(self.df.X * self.df.Y)]})
testing.assert_frame_equal(output, expected)
def test_dot_normalized(self):
metric = metrics.Dot('X', 'Y', True)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X * Y)': [(self.df.X * self.df.Y).mean()]})
testing.assert_frame_equal(output, expected)
def test_dot_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [sum(self.df.X * self.df.Y)]},
index=['sum(X * Y)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X * Y)': [5, 45]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [5, 45],
'grp': ['A', 'B']
},
index=['sum(X * Y)', 'sum(X * Y)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_mean_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_mean_split_by_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].mean()
expected.name = 'mean(X)'
testing.assert_series_equal(output, expected)
def test_mean_where(self):
metric = metrics.Mean('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_mean_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X)': [2.]})
testing.assert_frame_equal(output, expected)
def test_mean_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [2.]}, index=['mean(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'mean(X)': [1, 2.75]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [1, 2.75],
'grp': ['A', 'B']
},
index=['mean(X)', 'mean(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_max(self):
metric = metrics.Max('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'max(X)': [4]})
testing.assert_frame_equal(output, expected)
def test_min(self):
metric = metrics.Min('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'min(X)': [1]})
testing.assert_frame_equal(output, expected)
def test_weighted_mean_not_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1.25)
def test_weighted_mean_split_by_not_df(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((1.25, 3.), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted mean(X)'
testing.assert_series_equal(output, expected)
def test_weighted_mean_unmelted(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted mean(X)': [1.25]})
testing.assert_frame_equal(output, expected)
def test_weighted_mean_melted(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.25]}, index=['Y-weighted mean(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_mean_split_by_unmelted(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({'Y-weighted mean(X)': [1.25, 3.]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_mean_split_by_melted(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [1.25, 3.],
'grp': ['A', 'B']
},
index=['Y-weighted mean(X)', 'Y-weighted mean(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_quantile_raise(self):
with self.assertRaises(ValueError) as cm:
metrics.Quantile('X', 2)
self.assertEqual(str(cm.exception), 'quantiles must be in [0, 1].')
def test_quantile_multiple_quantiles_raise(self):
with self.assertRaises(ValueError) as cm:
metrics.Quantile('X', [0.1, 2])
self.assertEqual(str(cm.exception), 'quantiles must be in [0, 1].')
def test_quantile_not_df(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_quantile_where(self):
metric = metrics.Quantile('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2.5)
def test_quantile_interpolation(self):
metric = metrics.Quantile('X', 0.5, interpolation='lower')
output = metric.compute_on(
pd.DataFrame({'X': [1, 2]}), return_dataframe=False)
self.assertEqual(output, 1)
def test_quantile_split_by_not_df(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].quantile(0.5)
expected.name = 'quantile(X, 0.5)'
testing.assert_series_equal(output, expected)
def test_quantile_unmelted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'quantile(X, 0.5)': [2.]})
testing.assert_frame_equal(output, expected)
def test_quantile_melted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [2.]}, index=['quantile(X, 0.5)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_quantile_split_by_unmelted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'quantile(X, 0.5)': [1, 2.5]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_quantile_split_by_melted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [1, 2.5],
'grp': ['A', 'B']
},
index=['quantile(X, 0.5)'] * 2)
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_quantile_multiple_quantiles(self):
df = pd.DataFrame({'X': [0, 1]})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.1, 0.5]),
metrics.Count('X')])
output = metric.compute_on(df)
expected = pd.DataFrame(
[[0.1, 0.5, 2]],
columns=['quantile(X, 0.1)', 'quantile(X, 0.5)', 'count(X)'])
testing.assert_frame_equal(output, expected)
def test_quantile_multiple_quantiles_melted(self):
df = pd.DataFrame({'X': [0, 1]})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.1, 0.5]),
metrics.Count('X')])
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame(
{'Value': [0.1, 0.5, 2]},
index=['quantile(X, 0.1)', 'quantile(X, 0.5)', 'count(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_quantile_not_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Quantile('X', weight='Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1.25)
def test_weighted_quantile_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Quantile('X', weight='Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted quantile(X, 0.5)': [1.25]})
testing.assert_frame_equal(output, expected)
def test_weighted_quantile_multiple_quantiles_split_by(self):
df = pd.DataFrame({
'X': [0, 1, 2, 1, 2, 3],
'Y': [1, 2, 2, 1, 1, 1],
'grp': ['B'] * 3 + ['A'] * 3
})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.25, 0.5], weight='Y'),
metrics.Sum('X')])
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame(
{
'Y-weighted quantile(X, 0.25)': [1.25, 0.5],
'Y-weighted quantile(X, 0.5)': [2., 1.25],
'sum(X)': [6, 3]
},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_quantile_multiple_quantiles_split_by_melted(self):
df = pd.DataFrame({
'X': [0, 1, 2, 1, 2, 3],
'Y': [1, 2, 2, 1, 1, 1],
'grp': ['B'] * 3 + ['A'] * 3
})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.25, 0.5], weight='Y'),
metrics.Sum('X')])
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level=['Metric', 'grp'], inplace=True) # For Py2
expected = pd.DataFrame({'Value': [1.25, 0.5, 2., 1.25, 6., 3.]},
index=pd.MultiIndex.from_product(
([
'Y-weighted quantile(X, 0.25)',
'Y-weighted quantile(X, 0.5)', 'sum(X)'
], ['A', 'B']),
names=['Metric', 'grp']))
testing.assert_frame_equal(output, expected)
def test_variance_not_df(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.var())
def test_variance_biased(self):
metric = metrics.Variance('X', False)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.var(ddof=0))
def test_variance_split_by_not_df(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].var()
expected.name = 'var(X)'
testing.assert_series_equal(output, expected)
def test_variance_where(self):
metric = metrics.Variance('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "B"')['X'].var()
self.assertEqual(output, expected)
def test_variance_unmelted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'var(X)': [self.df.X.var()]})
testing.assert_frame_equal(output, expected)
def test_variance_melted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [self.df.X.var()]}, index=['var(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_variance_split_by_unmelted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'var(X)': self.df.groupby('grp')['X'].var()},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_variance_split_by_melted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame(
{
'Value': self.df.groupby('grp')['X'].var().values,
'grp': ['A', 'B']
},
index=['var(X)', 'var(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_weighted_variance_not_df(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1)
def test_weighted_variance_not_df_biased(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', False, 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 0.75)
def test_weighted_variance_split_by_not_df(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((2., 1), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted var(X)'
testing.assert_series_equal(output, expected)
def test_weighted_variance_unmelted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted var(X)': [1.]})
testing.assert_frame_equal(output, expected)
def test_weighted_variance_melted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.]}, index=['Y-weighted var(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_variance_split_by_unmelted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({'Y-weighted var(X)': [2., 1]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_variance_split_by_melted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [2., 1],
'grp': ['A', 'B']
},
index=['Y-weighted var(X)', 'Y-weighted var(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_standard_deviation_not_df(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.std())
def test_standard_deviation_biased(self):
metric = metrics.StandardDeviation('X', False)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.std(ddof=0))
def test_standard_deviation_split_by_not_df(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].std()
expected.name = 'sd(X)'
testing.assert_series_equal(output, expected)
def test_standard_deviation_where(self):
metric = metrics.StandardDeviation('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "B"')['X'].std()
self.assertEqual(output, expected)
def test_standard_deviation_unmelted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sd(X)': [self.df.X.std()]})
testing.assert_frame_equal(output, expected)
def test_standard_deviation_melted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [self.df.X.std()]}, index=['sd(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_standard_deviation_split_by_unmelted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sd(X)': self.df.groupby('grp')['X'].std()},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_standard_deviation_split_by_melted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame(
{
'Value': self.df.groupby('grp')['X'].std().values,
'grp': ['A', 'B']
},
index=['sd(X)', 'sd(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_weighted_standard_deviation_not_df(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1)
def test_weighted_standard_deviation_not_df_biased(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.StandardDeviation('X', False, 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, np.sqrt(0.75))
def test_weighted_standard_deviation_split_by_not_df(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((np.sqrt(2), 1), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted sd(X)'
testing.assert_series_equal(output, expected)
def test_weighted_standard_deviation_unmelted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted sd(X)': [1.]})
testing.assert_frame_equal(output, expected)
def test_weighted_standard_deviation_melted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.]}, index=['Y-weighted sd(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_standard_deviation_split_by_unmelted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({'Y-weighted sd(X)': [np.sqrt(2), 1]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_standard_deviation_split_by_melted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [np.sqrt(2), 1],
'grp': ['A', 'B']
},
index=['Y-weighted sd(X)', 'Y-weighted sd(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_cv_not_df(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, np.sqrt(1 / 3.))
def test_cv_biased(self):
metric = metrics.CV('X', False)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.std(ddof=0) / np.mean(self.df.X))
def test_cv_split_by_not_df(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].std() / [1, 2.75]
expected.name = 'cv(X)'
testing.assert_series_equal(output, expected)
def test_cv_where(self):
metric = metrics.CV('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "B"')['X'].std() / 2.75
self.assertEqual(output, expected)
def test_cv_unmelted(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'cv(X)': [np.sqrt(1 / 3.)]})
testing.assert_frame_equal(output, expected)
def test_cv_melted(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [np.sqrt(1 / 3.)]}, index=['cv(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_cv_split_by_unmelted(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'cv(X)': [0, np.sqrt(1 / 8.25)]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cv_split_by_melted(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame(
data={
'Value': [0, np.sqrt(1 / 8.25)],
'grp': ['A', 'B']
},
index=['cv(X)', 'cv(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_correlation(self):
metric = metrics.Correlation('X', 'Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, np.corrcoef(self.df.X, self.df.Y)[0, 1])
self.assertEqual(output, self.df.X.corr(self.df.Y))
def test_weighted_correlation(self):
metric = metrics.Correlation('X', 'Y', weight='Y')
output = metric.compute_on(self.df)
cov = np.cov(self.df.X, self.df.Y, aweights=self.df.Y)
expected = pd.DataFrame(
{'Y-weighted corr(X, Y)': [cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1])]})
testing.assert_frame_equal(output, expected)
def test_correlation_method(self):
metric = metrics.Correlation('X', 'Y', method='kendall')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.corr(self.df.Y, method='kendall'))
def test_correlation_kwargs(self):
metric = metrics.Correlation('X', 'Y', min_periods=10)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertTrue(pd.isnull(output))
def test_correlation_split_by_not_df(self):
metric = metrics.Correlation('X', 'Y')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
corr_a = metric.compute_on(
self.df[self.df.grp == 'A'], return_dataframe=False)
corr_b = metric.compute_on(
self.df[self.df.grp == 'B'], return_dataframe=False)
expected = pd.Series([corr_a, corr_b], index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'corr(X, Y)'
testing.assert_series_equal(output, expected)
def test_correlation_where(self):
metric = metrics.Correlation('X', 'Y', where='grp == "B"')
metric_no_filter = metrics.Correlation('X', 'Y')
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.grp == 'B'])
testing.assert_frame_equal(output, expected)
def test_correlation_df(self):
metric = metrics.Correlation('X', 'Y')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'corr(X, Y)': [self.df.X.corr(self.df.Y)]})
testing.assert_frame_equal(output, expected)
def test_correlation_split_by_df(self):
df = pd.DataFrame({
'X': [1, 1, 1, 2, 2, 2, 3, 4],
'Y': [3, 1, 1, 3, 4, 4, 3, 5],
'grp': ['A'] * 4 + ['B'] * 4
})
metric = metrics.Correlation('X', 'Y')
output = metric.compute_on(df, 'grp')
corr_a = metric.compute_on(df[df.grp == 'A'], return_dataframe=False)
corr_b = metric.compute_on(df[df.grp == 'B'], return_dataframe=False)
expected = pd.DataFrame({'corr(X, Y)': [corr_a, corr_b]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cov(self):
metric = metrics.Cov('X', 'Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, np.cov(self.df.X, self.df.Y)[0, 1])
self.assertEqual(output, self.df.X.cov(self.df.Y))
def test_cov_bias(self):
metric = metrics.Cov('X', 'Y', bias=True)
output = metric.compute_on(self.df, return_dataframe=False)
expected = np.mean(
(self.df.X - self.df.X.mean()) * (self.df.Y - self.df.Y.mean()))
self.assertEqual(output, expected)
def test_cov_ddof(self):
metric = metrics.Cov('X', 'Y', ddof=0)
output = metric.compute_on(self.df, return_dataframe=False)
expected = np.mean(
(self.df.X - self.df.X.mean()) * (self.df.Y - self.df.Y.mean()))
self.assertEqual(output, expected)
def test_cov_kwargs(self):
metric = metrics.Cov('X', 'Y', fweights=self.df.Y)
output = metric.compute_on(self.df)
expected = np.cov(self.df.X, self.df.Y, fweights=self.df.Y)[0, 1]
expected = pd.DataFrame({'cov(X, Y)': [expected]})
testing.assert_frame_equal(output, expected)
def test_weighted_cov(self):
metric = metrics.Cov('X', 'Y', weight='Y')
output = metric.compute_on(self.df)
expected = np.cov(self.df.X, self.df.Y, aweights=self.df.Y)[0, 1]
expected = pd.DataFrame({'Y-weighted cov(X, Y)': [expected]})
testing.assert_frame_equal(output, expected)
def test_cov_split_by_not_df(self):
metric = metrics.Cov('X', 'Y')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
cov_a = metric.compute_on(
self.df[self.df.grp == 'A'], return_dataframe=False)
cov_b = metric.compute_on(
self.df[self.df.grp == 'B'], return_dataframe=False)
expected = pd.Series([cov_a, cov_b], index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'cov(X, Y)'
testing.assert_series_equal(output, expected)
def test_cov_where(self):
metric = metrics.Cov('X', 'Y', where='grp == "B"')
metric_no_filter = metrics.Cov('X', 'Y')
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.grp == 'B'])
testing.assert_frame_equal(output, expected)
def test_cov_df(self):
metric = metrics.Cov('X', 'Y')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'cov(X, Y)': [self.df.X.cov(self.df.Y)]})
testing.assert_frame_equal(output, expected)
def test_cov_split_by_df(self):
df = pd.DataFrame({
'X': [1, 1, 1, 2, 2, 2, 3, 4],
'Y': [3, 1, 1, 3, 4, 4, 3, 5],
'grp': ['A'] * 4 + ['B'] * 4
})
metric = metrics.Cov('X', 'Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
cov_a = metric.compute_on(df[df.grp == 'A'], return_dataframe=False)
cov_b = metric.compute_on(df[df.grp == 'B'], return_dataframe=False)
expected = pd.DataFrame({'cov(X, Y)': [cov_a, cov_b]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
class CompositeMetric(unittest.TestCase):
"""Tests for composition of two metrics."""
df = pd.DataFrame({'X': [0, 1, 2, 3], 'Y': [0, 1, 1, 2]})
def test_add(self):
df = pd.DataFrame({'X': [1, 2, 3], 'Y': ['a', 'b', 'c']})
sumx = metrics.Sum('X')
metric = sumx + sumx
output = metric.compute_on(df, 'Y', return_dataframe=False)
expected = pd.Series([2, 4, 6], index=['a', 'b', 'c'])
expected.name = 'sum(X) + sum(X)'
expected.index.name = 'Y'
testing.assert_series_equal(output, expected)
def test_sub(self):
sumx = metrics.Sum('X')
sumy = metrics.Sum('Y')
metric = sumx - sumy
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_mul(self):
metric = 2. * metrics.Sum('X') * metrics.Sum('Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 48)
self.assertEqual(metric.name, '2.0 * sum(X) * sum(Y)')
def test_div(self):
metric = 6. / metrics.Sum('X') / metrics.Sum('Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 0.25)
self.assertEqual(metric.name, '6.0 / sum(X) / sum(Y)')
def test_neg(self):
base = metrics.MetricList((metrics.Sum('X'), metrics.Sum('Y')))
metric = -base
output = metric.compute_on(self.df)
expected = -base.compute_on(self.df)
expected.columns = ['-sum(X)', '-sum(Y)']
testing.assert_frame_equal(output, expected)
def test_pow(self):
metric = metrics.Sum('X')**metrics.Sum('Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 1296)
self.assertEqual(metric.name, 'sum(X) ^ sum(Y)')
def test_pow_with_scalar(self):
metric = metrics.Sum('X')**2
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 36)
self.assertEqual(metric.name, 'sum(X) ^ 2')
def test_sqrt(self):
metric = metrics.Sum('Y')**0.5
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
self.assertEqual(metric.name, 'sqrt(sum(Y))')
def test_rpow(self):
metric = 2**metrics.Sum('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 64)
self.assertEqual(metric.name, '2 ^ sum(X)')
def test_ratio(self):
metric = metrics.Ratio('X', 'Y')
output = metric.compute_on(self.df)
expected = metrics.Sum('X') / metrics.Sum('Y')
expected = expected.compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_to_dataframe(self):
metric = 5 + metrics.Sum('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'5 + sum(X)': [11]})
testing.assert_frame_equal(output, expected)
def test_where(self):
metric = metrics.Count('X', 'f', 'Y == 1') * metrics.Sum('X', 'b', 'Y == 2')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 6)
def test_between_operations(self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'C']
})
suma = metrics.Sum('X', where='grp == "A"')
sumb = metrics.Sum('X', where='grp == "B"')
pct = operations.PercentChange('Condition', 0)
output = (pct(suma) - pct(sumb)).compute_on(df)
expected = pct(suma).compute_on(df) - pct(sumb).compute_on(df)
expected.columns = ['%s - %s' % (c, c) for c in expected.columns]
testing.assert_frame_equal(output, expected)
def test_between_operations_where(self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'C']
})
sumx = metrics.Sum('X')
pcta = operations.PercentChange('Condition', 0, sumx, where='grp == "A"')
pctb = operations.PercentChange('Condition', 0, sumx, where='grp == "B"')
output = (pcta - pctb).compute_on(df)
expected = pcta.compute_on(df) - pctb.compute_on(df)
expected.columns = ['%s - %s' % (c, c) for c in expected.columns]
testing.assert_frame_equal(output, expected)
def test_between_stderr_operations_where(self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6] * 2,
'Condition': [0, 0, 0, 1, 1, 1] * 2,
'grp': ['A', 'A', 'B', 'A', 'B', 'C'] * 2,
'cookie': [1, 2, 3] * 4
})
np.random.seed(42)
sumx = metrics.Sum('X')
pcta = operations.PercentChange('Condition', 0, sumx, where='grp == "A"')
pctb = operations.PercentChange('Condition', 0, sumx)
jk = operations.Jackknife('cookie', pcta)
bst = operations.Bootstrap(None, pctb, 20, where='grp != "C"')
m = (jk / bst).rename_columns(
pd.MultiIndex.from_product((('sum(X)',), ('Value', 'SE'))))
output = m.compute_on(df)
np.random.seed(42)
expected = jk.compute_on(df).values / bst.compute_on(df).values
expected = pd.DataFrame(
expected, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_rename_columns(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
unweightd_metric = metrics.MetricList(
(metrics.Mean('X'), metrics.StandardDeviation('X')))
weightd_metric = metrics.MetricList(
(metrics.Mean('X', 'Y'), metrics.StandardDeviation('X', weight='Y')))
columns = ['mean', 'sum']
metric = (unweightd_metric / weightd_metric).rename_columns(columns)
output = metric.compute_on(df)
unweightd = unweightd_metric.compute_on(df)
weightd = weightd_metric.compute_on(df)
expected = pd.DataFrame(unweightd.values / weightd.values, columns=columns)
testing.assert_frame_equal(output, expected)
def test_set_name(self):
df = pd.DataFrame({'click': [1, 2], 'impression': [3, 1]})
metric = (metrics.Sum('click') / metrics.Sum('impression')).set_name('ctr')
output = metric.compute_on(df)
expected = | pd.DataFrame([[0.75]], columns=['ctr']) | pandas.DataFrame |
import os
import glob
import json
import logging
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from core.utils import Directories
from core.viz import plot_class_dist
class DataHandling(object):
def __init__(self):
pass
def drop_unique_cols(self, train, test, add_cols: list):
'''
Drops unique columns as a part of data-preprocessing; as they won't make any difference in model predictions.
Unique columns are automatically decided based on the logic below.
:param train: train set
:param test: test set
:param add_cols: any additional columns which need to be dropped. Needs to passed a an entry in list object
:return:
'''
df = | pd.concat([train, test]) | pandas.concat |
from sklearn.inspection import permutation_importance
from typing import List, Callable
from datetime import datetime
import matplotlib.pyplot as plt
import lightgbm as lgb
import pandas as pd
import numpy as np
import warnings
import optuna
import pickle
import tqdm
import shap
import time
import gc
from ..fit_model import *
from ..additional import *
optuna.logging.set_verbosity(optuna.logging.WARNING)
##############################################################################
class Conveyor:
"""Conveyor consisting of blocks that carry processing of
data passing inside the conveyor, and ending with a regressor
Parameters
----------
estimator : object = None
Regressor that performs the prediction task
* blocks : Transformes
Transformers that carry out data processing
"""
def __init__(self, *blocks, estimator:object = None):
self.blocks = list(blocks)
self.estimator = estimator
warnings.filterwarnings('ignore')
def __repr__(self):
_repr = self.__class__.__name__ + "= (\n"
indent = " " * (len(_repr) - 1)
for block in self.blocks:
_repr += f"{indent}{repr(block)}, \n"
_repr += f"{indent}estimator = {repr(self.estimator)}\n{indent} )"
return _repr
##############################################################################
def fit(self, X:pd.DataFrame, Y:pd.DataFrame or pd.Series):
""" Function that is responsible for filling the model with data and training the model
Parameters
----------
X : pd.DataFrame
Input data, features (regressors)
Y : pd.DataFrame or pd.Series
Input data, targets
"""
_, __ = self.fit_transform(X, Y, estimator = True)
def fit_transform(self, X:pd.DataFrame, Y:pd.DataFrame or pd.Series, estimator:bool = False):
""" Function that is responsible for filling the model with data and training the model, and returning the transformed
Parameters
----------
X : pd.DataFrame
Input data, features (regressors)
Y : pd.DataFrame or pd.Series
Input data, targets
estimator : bool
fit estimator or not
Returns
----------
transformed data : list or pd.DataFrame
Transformed data
"""
X_, Y_ = (X.copy(), Y.copy())
pbar = ProgressBar(len(self.blocks) + int(estimator))
for block in self.blocks:
pbar.set_postfix('transform', block.__class__.__name__)
X_, Y_ = self._transform(block.fit(X_, Y_), X_, Y_)
pbar.update()
if estimator:
pbar.set_postfix('transform', self.estimator.__class__.__name__)
self.estimator.fit(X_, Y_)
pbar.update()
return X_, Y_
##############################################################################
def transform(self,
X:pd.DataFrame,
Y:pd.DataFrame or pd.Series = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 16 09:04:46 2017
@author: <NAME>
pygemfxns_plotting.py produces figures of simulation results
"""
# Built-in Libraries
import os
import collections
# External Libraries
import numpy as np
import pandas as pd
#import netCDF4 as nc
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.ticker import MaxNLocator
import matplotlib.patches as mpatches
import scipy
from scipy import stats
from scipy.ndimage import uniform_filter
import cartopy
#import geopandas
import xarray as xr
from osgeo import gdal, ogr, osr
import pickle
# Local Libraries
import pygem_input as input
import pygemfxns_modelsetup as modelsetup
import pygemfxns_massbalance as massbalance
import pygemfxns_gcmbiasadj as gcmbiasadj
import class_mbdata
import class_climate
#import run_simulation
# Script options
option_plot_cmip5_normalizedchange = 1
option_plot_cmip5_runoffcomponents = 0
option_plot_cmip5_map = 0
option_output_tables = 0
option_subset_GRACE = 0
option_plot_modelparam = 0
option_plot_era_normalizedchange = 1
option_compare_GCMwCal = 0
option_plot_mcmc_errors = 0
option_plot_maxloss_issues = 0
option_plot_individual_glaciers = 0
option_plot_degrees = 0
option_plot_pies = 0
option_plot_individual_gcms = 0
#%% ===== Input data =====
netcdf_fp_cmip5 = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/simulations/spc/'
netcdf_fp_era = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/simulations/ERA-Interim/ERA-Interim_1980_2017_nochg'
#mcmc_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/cal_opt2_allglac_1ch_tn_20190108/'
#mcmc_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/cal_opt2_spc_20190222_adjp10/'
mcmc_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/cal_opt2_spc_20190308_adjp12/cal_opt2/'
figure_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/figures/cmip5/'
csv_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/csv/cmip5/'
cal_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/cal_opt2_spc_20190308_adjp12/cal_opt2/'
# Regions
rgi_regions = [13, 14, 15]
#rgi_regions = [13]
# Shapefiles
rgiO1_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/RGI/rgi60/00_rgi60_regions/00_rgi60_O1Regions.shp'
watershed_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/HMA_basins_20181018_4plot.shp'
kaab_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/kaab2015_regions.shp'
srtm_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/SRTM_HMA.tif'
srtm_contour_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/SRTM_HMA_countours_2km_gt3000m_smooth.shp'
rgi_glac_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA.shp'
#kaab_dict_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA_w_watersheds_kaab.csv'
#kaab_csv = pd.read_csv(kaab_dict_fn)
#kaab_dict = dict(zip(kaab_csv.RGIId, kaab_csv.kaab))
# GCMs and RCP scenarios
#gcm_names = ['CanESM2', 'CCSM4', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 'GFDL-CM3', 'GFDL-ESM2M', 'GISS-E2-R', 'IPSL-CM5A-LR',
# 'IPSL-CM5A-MR', 'MIROC5', 'MRI-CGCM3', 'NorESM1-M']
gcm_names = ['CanESM2']
#gcm_names = ['CanESM2', 'CCSM4', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 'GFDL-CM3', 'GFDL-ESM2M', 'GISS-E2-R', 'IPSL-CM5A-LR',
# 'MPI-ESM-LR', 'NorESM1-M']
rcps = ['rcp26', 'rcp45', 'rcp85']
#rcps = ['rcp26']
# Grouping
grouping = 'all'
#grouping = 'rgi_region'
#grouping = 'watershed'
#grouping = 'kaab'
# Variable name
vn = 'mass_change'
#vn = 'volume_norm'
#vn = 'peakwater'
# Group dictionaries
watershed_dict_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA_dict_watershed.csv'
watershed_csv = pd.read_csv(watershed_dict_fn)
watershed_dict = dict(zip(watershed_csv.RGIId, watershed_csv.watershed))
kaab_dict_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA_dict_kaab.csv'
kaab_csv = pd.read_csv(kaab_dict_fn)
kaab_dict = dict(zip(kaab_csv.RGIId, kaab_csv.kaab_name))
# GRACE mascons
mascon_fp = input.main_directory + '/../GRACE/GSFC.glb.200301_201607_v02.4/'
mascon_fn = 'mascon.txt'
mascon_cns = ['CenLat', 'CenLon', 'LatWidth', 'LonWidth', 'Area_arcdeg', 'Area_km2', 'location', 'basin',
'elevation_flag']
mascon_df = pd.read_csv(mascon_fp + mascon_fn, header=None, names=mascon_cns, skiprows=14,
delim_whitespace=True)
mascon_df = mascon_df.sort_values(by=['CenLat', 'CenLon'])
mascon_df.reset_index(drop=True, inplace=True)
degree_size = 0.25
peakwater_Nyears = 10
# Plot label dictionaries
title_dict = {'Amu_Darya': 'Amu Darya',
'Brahmaputra': 'Brahmaputra',
'Ganges': 'Ganges',
'Ili': 'Ili',
'Indus': 'Indus',
'Inner_Tibetan_Plateau': 'Inner TP',
'Inner_Tibetan_Plateau_extended': 'Inner TP ext',
'Irrawaddy': 'Irrawaddy',
'Mekong': 'Mekong',
'Salween': 'Salween',
'Syr_Darya': 'Syr Darya',
'Tarim': 'Tarim',
'Yangtze': 'Yangtze',
'inner_TP': 'Inner TP',
'Karakoram': 'Karakoram',
'Yigong': 'Yigong',
'Yellow': 'Yellow',
'Bhutan': 'Bhutan',
'Everest': 'Everest',
'West Nepal': 'West Nepal',
'Spiti Lahaul': 'Spiti Lahaul',
'tien_shan': 'Tien Shan',
'Pamir': 'Pamir',
'pamir_alai': 'Pamir Alai',
'Kunlun': 'Kunlun',
'Hindu Kush': 'Hindu Kush',
13: 'Central Asia',
14: 'South Asia West',
15: 'South Asia East',
'all': 'HMA'
}
title_location = {'Syr_Darya': [68, 46.1],
'Ili': [83.6, 45.5],
'Amu_Darya': [64.6, 36.9],
'Tarim': [83.0, 39.2],
'Inner_Tibetan_Plateau_extended': [100, 40],
'Indus': [70.7, 31.9],
'Inner_Tibetan_Plateau': [85, 32.4],
'Yangtze': [106.0, 29.8],
'Ganges': [81.3, 26.6],
'Brahmaputra': [92.0, 26],
'Irrawaddy': [96.2, 23.8],
'Salween': [98.5, 20.8],
'Mekong': [103.8, 17.5],
'Yellow': [106.0, 36],
13: [83,39],
14: [70.8, 30],
15: [81,26.8],
'inner_TP': [89, 33.5],
'Karakoram': [68.7, 33.5],
'Yigong': [97.5, 26.2],
'Bhutan': [92.1, 26],
'Everest': [85, 26.3],
'West Nepal': [76.5, 28],
'Spiti Lahaul': [72, 31.9],
'tien_shan': [80, 42],
'Pamir': [67.3, 36.5],
'pamir_alai': [65.2, 40.2],
'Kunlun': [79, 37.5],
'Hindu Kush': [65.3, 35]
}
vn_dict = {'volume_glac_annual': 'Normalized Volume [-]',
'volume_norm': 'Normalized Volume Remaining [-]',
'runoff_glac_annual': 'Normalized Runoff [-]',
'peakwater': 'Peak Water [yr]',
'temp_glac_annual': 'Temperature [$^\circ$C]',
'prec_glac_annual': 'Precipitation [m]',
'precfactor': 'Precipitation Factor [-]',
'tempchange': 'Temperature bias [$^\circ$C]',
'ddfsnow': 'DDFsnow [mm w.e. d$^{-1}$ $^\circ$C$^{-1}$]'}
rcp_dict = {'rcp26': '2.6',
'rcp45': '4.5',
'rcp60': '6.0',
'rcp85': '8.5'}
# Colors list
colors_rgb = [(0.00, 0.57, 0.57), (0.71, 0.43, 1.00), (0.86, 0.82, 0.00), (0.00, 0.29, 0.29), (0.00, 0.43, 0.86),
(0.57, 0.29, 0.00), (1.00, 0.43, 0.71), (0.43, 0.71, 1.00), (0.14, 1.00, 0.14), (1.00, 0.71, 0.47),
(0.29, 0.00, 0.57), (0.57, 0.00, 0.00), (0.71, 0.47, 1.00), (1.00, 1.00, 0.47)]
gcm_colordict = dict(zip(gcm_names, colors_rgb[0:len(gcm_names)]))
rcp_colordict = {'rcp26':'b', 'rcp45':'k', 'rcp60':'m', 'rcp85':'r'}
rcp_styledict = {'rcp26':':', 'rcp45':'--', 'rcp85':'-.'}
east = 60
west = 110
south = 15
north = 50
xtick = 5
ytick = 5
xlabel = 'Longitude [$^\circ$]'
ylabel = 'Latitude [$^\circ$]'
#%% FUNCTIONS
def select_groups(grouping, main_glac_rgi_all):
"""
Select groups based on grouping
"""
if grouping == 'rgi_region':
groups = main_glac_rgi_all.O1Region.unique().tolist()
group_cn = 'O1Region'
elif grouping == 'watershed':
groups = main_glac_rgi_all.watershed.unique().tolist()
group_cn = 'watershed'
elif grouping == 'kaab':
groups = main_glac_rgi_all.kaab.unique().tolist()
group_cn = 'kaab'
groups = [x for x in groups if str(x) != 'nan']
elif grouping == 'degree':
groups = main_glac_rgi_all.deg_id.unique().tolist()
group_cn = 'deg_id'
elif grouping == 'mascon':
groups = main_glac_rgi_all.mascon_idx.unique().tolist()
groups = [int(x) for x in groups]
group_cn = 'mascon_idx'
else:
groups = ['all']
group_cn = 'all_group'
try:
groups = sorted(groups, key=str.lower)
except:
groups = sorted(groups)
return groups, group_cn
def partition_multimodel_groups(gcm_names, grouping, vn, main_glac_rgi_all, rcp=None):
"""Partition multimodel data by each group for all GCMs for a given variable
Parameters
----------
gcm_names : list
list of GCM names
grouping : str
name of grouping to use
vn : str
variable name
main_glac_rgi_all : pd.DataFrame
glacier table
rcp : str
rcp name
Output
------
time_values : np.array
time values that accompany the multimodel data
ds_group : list of lists
dataset containing the multimodel data for a given variable for all the GCMs
ds_glac : np.array
dataset containing the variable of interest for each gcm and glacier
"""
# Groups
groups, group_cn = select_groups(grouping, main_glac_rgi_all)
# variable name
if vn == 'volume_norm' or vn == 'mass_change':
vn_adj = 'volume_glac_annual'
elif vn == 'peakwater':
vn_adj = 'runoff_glac_annual'
else:
vn_adj = vn
ds_group = [[] for group in groups]
for ngcm, gcm_name in enumerate(gcm_names):
for region in rgi_regions:
# Load datasets
if gcm_name == 'ERA-Interim':
netcdf_fp = netcdf_fp_era
ds_fn = 'R' + str(region) + '_ERA-Interim_c2_ba1_100sets_1980_2017.nc'
else:
netcdf_fp = netcdf_fp_cmip5 + vn_adj + '/'
ds_fn = ('R' + str(region) + '_' + gcm_name + '_' + rcp + '_c2_ba' + str(input.option_bias_adjustment) +
'_100sets_2000_2100--' + vn_adj + '.nc')
# Bypass GCMs that are missing a rcp scenario
try:
ds = xr.open_dataset(netcdf_fp + ds_fn)
except:
continue
# Extract time variable
if 'annual' in vn_adj:
try:
time_values = ds[vn_adj].coords['year_plus1'].values
except:
time_values = ds[vn_adj].coords['year'].values
elif 'monthly' in vn_adj:
time_values = ds[vn_adj].coords['time'].values
# Merge datasets
if region == rgi_regions[0]:
vn_glac_all = ds[vn_adj].values[:,:,0]
vn_glac_std_all = ds[vn_adj].values[:,:,1]
else:
vn_glac_all = np.concatenate((vn_glac_all, ds[vn_adj].values[:,:,0]), axis=0)
vn_glac_std_all = np.concatenate((vn_glac_std_all, ds[vn_adj].values[:,:,1]), axis=0)
try:
ds.close()
except:
continue
if ngcm == 0:
ds_glac = vn_glac_all[np.newaxis,:,:]
else:
ds_glac = np.concatenate((ds_glac, vn_glac_all[np.newaxis,:,:]), axis=0)
# Cycle through groups
for ngroup, group in enumerate(groups):
# Select subset of data
main_glac_rgi = main_glac_rgi_all.loc[main_glac_rgi_all[group_cn] == group]
vn_glac = vn_glac_all[main_glac_rgi.index.values.tolist(),:]
# vn_glac_std = vn_glac_std_all[main_glac_rgi.index.values.tolist(),:]
# vn_glac_var = vn_glac_std **2
# Regional sum
vn_reg = vn_glac.sum(axis=0)
# Record data for multi-model stats
if ngcm == 0:
ds_group[ngroup] = [group, vn_reg]
else:
ds_group[ngroup][1] = np.vstack((ds_group[ngroup][1], vn_reg))
return groups, time_values, ds_group, ds_glac
def partition_era_groups(grouping, vn, main_glac_rgi_all):
"""Partition multimodel data by each group for all GCMs for a given variable
Parameters
----------
grouping : str
name of grouping to use
vn : str
variable name
main_glac_rgi_all : pd.DataFrame
glacier table
Output
------
time_values : np.array
time values that accompany the multimodel data
ds_group : list of lists
dataset containing the multimodel data for a given variable for all the GCMs
ds_glac : np.array
dataset containing the variable of interest for each gcm and glacier
"""
# Groups
groups, group_cn = select_groups(grouping, main_glac_rgi_all)
# variable name
if vn == 'volume_norm' or vn == 'mass_change':
vn_adj = 'volume_glac_annual'
elif vn == 'peakwater':
vn_adj = 'runoff_glac_annual'
else:
vn_adj = vn
ds_group = [[] for group in groups]
for region in rgi_regions:
# Load datasets
ds_fn = 'R' + str(region) + '_ERA-Interim_c2_ba1_100sets_1980_2017.nc'
ds = xr.open_dataset(netcdf_fp_era + ds_fn)
# Extract time variable
if 'annual' in vn_adj:
try:
time_values = ds[vn_adj].coords['year_plus1'].values
except:
time_values = ds[vn_adj].coords['year'].values
elif 'monthly' in vn_adj:
time_values = ds[vn_adj].coords['time'].values
# Merge datasets
if region == rgi_regions[0]:
vn_glac_all = ds[vn_adj].values[:,:,0]
vn_glac_std_all = ds[vn_adj].values[:,:,1]
else:
vn_glac_all = np.concatenate((vn_glac_all, ds[vn_adj].values[:,:,0]), axis=0)
vn_glac_std_all = np.concatenate((vn_glac_std_all, ds[vn_adj].values[:,:,1]), axis=0)
# Close dataset
ds.close()
ds_glac = [vn_glac_all, vn_glac_std_all]
# Cycle through groups
for ngroup, group in enumerate(groups):
# Select subset of data
main_glac_rgi = main_glac_rgi_all.loc[main_glac_rgi_all[group_cn] == group]
vn_glac = vn_glac_all[main_glac_rgi.index.values.tolist(),:]
vn_glac_std = vn_glac_std_all[main_glac_rgi.index.values.tolist(),:]
vn_glac_var = vn_glac_std **2
# Regional mean, standard deviation, and variance
# mean: E(X+Y) = E(X) + E(Y)
# var: Var(X+Y) = Var(X) + Var(Y) + 2*Cov(X,Y)
# assuming X and Y are indepdent, then Cov(X,Y)=0, so Var(X+Y) = Var(X) + Var(Y)
# std: std(X+Y) = (Var(X+Y))**0.5
# Regional sum
vn_reg = vn_glac.sum(axis=0)
vn_reg_var = vn_glac_var.sum(axis=0)
# vn_reg_std = vn_glac_var**0.5
# Record data for multi-model stats
ds_group[ngroup] = [group, vn_reg, vn_reg_var]
return groups, time_values, ds_group, ds_glac
def partition_modelparams_groups(grouping, vn, main_glac_rgi_all):
"""Partition model parameters by each group
Parameters
----------
grouping : str
name of grouping to use
vn : str
variable name
main_glac_rgi_all : pd.DataFrame
glacier table
Output
------
groups : list
list of group names
ds_group : list of lists
dataset containing the multimodel data for a given variable for all the GCMs
"""
# Groups
groups, group_cn = select_groups(grouping, main_glac_rgi_all)
ds_group = [[] for group in groups]
# Cycle through groups
for ngroup, group in enumerate(groups):
# Select subset of data
main_glac_rgi = main_glac_rgi_all.loc[main_glac_rgi_all[group_cn] == group]
vn_glac = main_glac_rgi_all[vn].values[main_glac_rgi.index.values.tolist()]
# Regional sum
vn_reg = vn_glac.mean(axis=0)
# Record data for each group
ds_group[ngroup] = [group, vn_reg]
return groups, ds_group
def vn_multimodel_mean_processed(vn, ds, idx, time_values, every_glacier=0):
"""
Calculate multi-model mean for a given variable of interest
Parameters
----------
vn : str
variable/parameter name
ds : list
dataset containing groups
group_idx : int
group index
time_values : np.array
array of years
every_glacier : int
switch to work with groups or work with concatenated dataframe
Output
------
"""
# Multi-model mean
if every_glacier == 0:
vn_multimodel_mean = ds[idx][1].mean(axis=0)
else:
vn_multimodel_mean = ds[:,idx,:].mean(axis=0)
# Normalized volume based on initial volume
if vn == 'volume_norm':
if vn_multimodel_mean[0] > 0:
output_multimodel_mean = vn_multimodel_mean / vn_multimodel_mean[0]
else:
output_multimodel_mean = np.zeros(vn_multimodel_mean.shape)
# Peak water based on 10-yr running average
elif vn == 'peakwater':
vn_runningmean = uniform_filter(vn_multimodel_mean, peakwater_Nyears)
output_multimodel_mean = time_values[np.where(vn_runningmean == vn_runningmean.max())[-1][0]]
return output_multimodel_mean
def peakwater(runoff, time_values, nyears):
"""Compute peak water based on the running mean of N years
Parameters
----------
runoff : np.array
one-dimensional array of runoff for each timestep
time_values : np.array
time associated with each timestep
nyears : int
number of years to compute running mean used to smooth peakwater variations
Output
------
peakwater_yr : int
peakwater year
peakwater_chg : float
percent change of peak water compared to first timestep (running means used)
runoff_chg : float
percent change in runoff at the last timestep compared to the first timestep (running means used)
"""
runningmean = uniform_filter(runoff, size=(nyears))
peakwater_idx = np.where(runningmean == runningmean.max())[-1][0]
peakwater_yr = time_values[peakwater_idx]
peakwater_chg = (runningmean[peakwater_idx] - runningmean[0]) / runningmean[0] * 100
runoff_chg = (runningmean[-1] - runningmean[0]) / runningmean[0] * 100
return peakwater_yr, peakwater_chg, runoff_chg
def size_thresholds(variable, cutoffs, sizes):
"""Loop through size thresholds for a given variable to plot
Parameters
----------
variable : np.array
data associated with glacier characteristic
cutoffs : list
values used as minimums for thresholds
(ex. 100 would give you greater than 100)
sizes : list
size values for the plot
Output
------
output : np.array
plot size for each glacier
"""
output = np.zeros(variable.shape)
for i, cutoff in enumerate(cutoffs):
output[(variable>cutoff) & (output==0)] = sizes[i]
output[output==0] = 2
return output
def select_region_climatedata(gcm_name, rcp, main_glac_rgi):
"""
Get the regional temperature and precipitation for a given dataset.
Extracts all nearest neighbor temperature and precipitation data for a given set of glaciers. The mean temperature
and precipitation of the group of glaciers is returned. If two glaciers have the same temp/prec data, that data
is only used once in the mean calculations. Additionally, one would not expect for different GCMs to be similar
because they all have different resolutions, so this mean calculations will have different numbers of pixels.
Parameters
----------
gcm_name : str
GCM name
rcp : str
rcp scenario (ex. rcp26)
main_glac_rgi : pd.DataFrame
glacier dataset used to select the nearest neighbor climate data
"""
# Date tables
print('select_region_climatedata fxn dates supplied manually')
dates_table_ref = modelsetup.datesmodelrun(startyear=2000, endyear=2100, spinupyears=0,
option_wateryear=1)
dates_table = modelsetup.datesmodelrun(startyear=2000, endyear=2100, spinupyears=0,
option_wateryear=1)
# Load gcm lat/lons
gcm = class_climate.GCM(name=gcm_name, rcp_scenario=rcp)
# Select lat/lon from GCM
ds_elev = xr.open_dataset(gcm.fx_fp + gcm.elev_fn)
gcm_lat_values_all = ds_elev.lat.values
gcm_lon_values_all = ds_elev.lon.values
ds_elev.close()
# Lat/lon dictionary to convert
gcm_lat_dict = dict(zip(range(gcm_lat_values_all.shape[0]), list(gcm_lat_values_all)))
gcm_lon_dict = dict(zip(range(gcm_lon_values_all.shape[0]), list(gcm_lon_values_all)))
# Find nearest neighbors for glaciers that have pixles
latlon_nearidx = pd.DataFrame(np.zeros((main_glac_rgi.shape[0],2)), columns=['CenLat','CenLon'])
latlon_nearidx.iloc[:,0] = (np.abs(main_glac_rgi.CenLat.values[:,np.newaxis] - gcm_lat_values_all).argmin(axis=1))
latlon_nearidx.iloc[:,1] = (np.abs(main_glac_rgi.CenLon.values[:,np.newaxis] - gcm_lon_values_all).argmin(axis=1))
latlon_nearidx = latlon_nearidx.drop_duplicates().sort_values(['CenLat', 'CenLon'])
latlon_nearidx.reset_index(drop=True, inplace=True)
latlon_reg = latlon_nearidx.copy()
latlon_reg.CenLat.replace(gcm_lat_dict, inplace=True)
latlon_reg.CenLon.replace(gcm_lon_dict, inplace=True)
# ===== LOAD CLIMATE DATA =====
# Reference climate data
ref_gcm = class_climate.GCM(name=input.ref_gcm_name)
# Air temperature [degC], Precipitation [m], Elevation [masl], Lapse rate [K m-1]
ref_temp, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.temp_fn, ref_gcm.temp_vn, latlon_reg,
dates_table_ref)
ref_prec, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.prec_fn, ref_gcm.prec_vn, latlon_reg,
dates_table_ref)
# ref_elev = ref_gcm.importGCMfxnearestneighbor_xarray(ref_gcm.elev_fn, ref_gcm.elev_vn, latlon_reg)
# GCM climate data
gcm_temp_all, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.temp_fn, gcm.temp_vn, latlon_reg, dates_table)
gcm_prec_all, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.prec_fn, gcm.prec_vn, latlon_reg, dates_table)
# gcm_elev = gcm.importGCMfxnearestneighbor_xarray(gcm.elev_fn, gcm.elev_vn, latlon_reg)
# GCM subset to agree with reference time period to calculate bias corrections
gcm_subset_idx_start = np.where(dates_table.date.values == dates_table_ref.date.values[0])[0][0]
gcm_subset_idx_end = np.where(dates_table.date.values == dates_table_ref.date.values[-1])[0][0]
gcm_temp = gcm_temp_all[:,gcm_subset_idx_start:gcm_subset_idx_end+1]
gcm_prec = gcm_prec_all[:,gcm_subset_idx_start:gcm_subset_idx_end+1]
## ===== BIAS ADJUSTMENTS =====
# OPTION 2: Adjust temp and prec according to Huss and Hock (2015) accounts for means and interannual variability
if input.option_bias_adjustment == 2:
# TEMPERATURE BIAS CORRECTIONS
# Mean monthly temperature
ref_temp_monthly_avg = (ref_temp.reshape(-1,12).transpose()
.reshape(-1,int(ref_temp.shape[1]/12)).mean(1).reshape(12,-1).transpose())
gcm_temp_monthly_avg = (gcm_temp.reshape(-1,12).transpose()
.reshape(-1,int(gcm_temp.shape[1]/12)).mean(1).reshape(12,-1).transpose())
# Monthly bias adjustment
gcm_temp_monthly_adj = ref_temp_monthly_avg - gcm_temp_monthly_avg
# Monthly temperature bias adjusted according to monthly average
t_mt = gcm_temp_all + np.tile(gcm_temp_monthly_adj, int(gcm_temp_all.shape[1]/12))
# Mean monthly temperature bias adjusted according to monthly average
t_m25avg = np.tile(gcm_temp_monthly_avg + gcm_temp_monthly_adj, int(gcm_temp_all.shape[1]/12))
# Calculate monthly standard deviation of temperature
ref_temp_monthly_std = (ref_temp.reshape(-1,12).transpose()
.reshape(-1,int(ref_temp.shape[1]/12)).std(1).reshape(12,-1).transpose())
gcm_temp_monthly_std = (gcm_temp.reshape(-1,12).transpose()
.reshape(-1,int(gcm_temp.shape[1]/12)).std(1).reshape(12,-1).transpose())
variability_monthly_std = ref_temp_monthly_std / gcm_temp_monthly_std
# Bias adjusted temperature accounting for monthly mean and variability
gcm_temp_bias_adj = t_m25avg + (t_mt - t_m25avg) * np.tile(variability_monthly_std, int(gcm_temp_all.shape[1]/12))
# PRECIPITATION BIAS CORRECTIONS
# Calculate monthly mean precipitation
ref_prec_monthly_avg = (ref_prec.reshape(-1,12).transpose()
.reshape(-1,int(ref_temp.shape[1]/12)).mean(1).reshape(12,-1).transpose())
gcm_prec_monthly_avg = (gcm_prec.reshape(-1,12).transpose()
.reshape(-1,int(gcm_temp.shape[1]/12)).mean(1).reshape(12,-1).transpose())
bias_adj_prec = ref_prec_monthly_avg / gcm_prec_monthly_avg
# Bias adjusted precipitation accounting for differences in monthly mean
gcm_prec_bias_adj = gcm_prec_all * np.tile(bias_adj_prec, int(gcm_temp_all.shape[1]/12))
# Regional means
reg_mean_temp_biasadj = gcm_temp_bias_adj.mean(axis=0)
reg_mean_prec_biasadj = gcm_prec_bias_adj.mean(axis=0)
return reg_mean_temp_biasadj, reg_mean_prec_biasadj
#%% LOAD ALL GLACIERS
# Load all glaciers
for rgi_region in rgi_regions:
# Data on all glaciers
main_glac_rgi_region = modelsetup.selectglaciersrgitable(rgi_regionsO1=[rgi_region], rgi_regionsO2 = 'all',
rgi_glac_number='all')
# Glacier hypsometry [km**2]
main_glac_hyps_region = modelsetup.import_Husstable(main_glac_rgi_region, input.hyps_filepath,
input.hyps_filedict, input.hyps_colsdrop)
# Ice thickness [m], average
main_glac_icethickness_region= modelsetup.import_Husstable(main_glac_rgi_region,
input.thickness_filepath, input.thickness_filedict,
input.thickness_colsdrop)
if rgi_region == rgi_regions[0]:
main_glac_rgi_all = main_glac_rgi_region
main_glac_hyps_all = main_glac_hyps_region
main_glac_icethickness_all = main_glac_icethickness_region
else:
main_glac_rgi_all = pd.concat([main_glac_rgi_all, main_glac_rgi_region], sort=False)
main_glac_hyps_all = | pd.concat([main_glac_hyps_all, main_glac_hyps_region], sort=False) | pandas.concat |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.font_manager import FontProperties
from statsmodels.tsa import stattools
from statsmodels.graphics import tsaplots
class Chp023(object):
def __init__(self):
self.name = 'Chp022'
# 数据文件格式:编号 日期 星期几 开盘价 最高价
# 最低价 收益价 收益
self.data_file = 'data/pqb/chp023_001.txt'
def startup(self):
print('第23章:时间序列基本性质')
#self.acf_pacf_demo()
#self.dwn_demo()
#self.random_walk_demo()
self.random_walk_fit()
def acf_pacf_demo(self):
data = pd.read_csv(self.data_file, sep='\t', index_col='Trddt')
sh_index = data[data.Indexcd==1]
sh_index.index = pd.to_datetime(sh_index.index)
sh_return = sh_index.Retindex
print('时间序列长为:N={0}'.format(len(sh_return)))
acfs = stattools.acf(sh_return)
print(acfs)
pacfs = stattools.pacf(sh_return)
print(pacfs)
tsaplots.plot_acf(sh_return, use_vlines=True, lags=30)
plt.show()
tsaplots.plot_pacf(sh_return, use_vlines=True, lags=30)
plt.show()
def dwn_demo(self):
'''
白噪声举例
'''
dwn = np.random.standard_normal(size=500)
plt.plot(dwn, c='b')
plt.title('White Noise Demo')
plt.show()
acfs = stattools.acf(dwn)
print(acfs)
tsaplots.plot_acf(dwn, use_vlines=True, lags=30)
plt.show()
def random_walk_demo(self):
'''
随机游走时间序列建模示例
'''
w = np.random.standard_normal(size=1000)
x = w
for t in range(1, len(w)):
x[t] = x[t-1] + w[t]
plt.plot(x, c='b')
plt.title('Random Walk Demo')
plt.show()
acfs = stattools.acf(x)
print(acfs)
tsaplots.plot_acf(x, use_vlines=True, lags=30)
plt.show()
# 拟合随机游走信号
r = []
for t in range(1, len(x)):
r.append(x[t] - x[t-1])
rd = np.array(r)
plt.plot(rd, c='r')
plt.title('Residue Signal')
plt.show()
rd_acfs = stattools.acf(rd)
print(rd_acfs)
tsaplots.plot_acf(rd, use_vlines=True, lags=30)
plt.show()
def random_walk_fit(self):
data = pd.read_csv(self.data_file, sep='\t', index_col='Trddt')
sh_index = data[data.Indexcd==1]
sh_index.index = | pd.to_datetime(sh_index.index) | pandas.to_datetime |
from __future__ import annotations
import random
import unittest
from dataclasses import dataclass
from typing import Dict, Iterator, List, Optional
import numpy as np
import pandas as pd
from gabriel_lego.cv.colors import LEGOColorID
class _NotEnoughBricks(Exception):
pass
class _NotEnoughSpace(Exception):
pass
@dataclass
class Brick:
length: int
color: LEGOColorID
def to_array_repr(self) -> List[int]:
return [self.color.value] * self.length
def __len__(self):
return self.length
def __getitem__(self, item: int):
if type(item) != int:
raise ValueError(item)
elif item < 0 or item >= self.length:
raise IndexError(item)
else:
return self.color.value
def __str__(self):
return self.__repr__()
def __repr__(self):
return f'{self.to_array_repr()}'
def __hash__(self):
return hash(str(self.length) + self.color.name)
def __eq__(self, other: Brick):
return self.color == other.color and self.length == other.length
def copy(self) -> Brick:
return Brick(self.length, self.color)
class BrickRow:
def __init__(self, length: int = 6):
self._length = length
self._bricks = {} # anchor: brick
self._avail_positions = set(range(length))
self._max_avail_space = length
def clear(self):
self._bricks = {} # anchor: brick
self._avail_positions = set(range(self._length))
self._max_avail_space = self._length
def copy(self) -> BrickRow:
other = BrickRow()
other._length = self._length
for anchor, brick in self._bricks.items():
other._bricks[anchor] = brick.copy()
other._avail_positions = set()
for pos in self._avail_positions:
other._avail_positions.add(pos)
other._max_avail_space = self._max_avail_space
return other
def __eq__(self, other: BrickRow):
try:
assert self._length == other._length
assert len(self._bricks) == len(other._bricks)
for anchor, brick in self._bricks.items():
assert anchor in other._bricks
assert other._bricks[anchor] == brick
for pos in self._avail_positions:
assert pos in other._avail_positions
assert self._max_avail_space == other._max_avail_space
return True
except AssertionError:
return False
@property
def brick_count(self) -> int:
return len(self._bricks.keys())
@property
def length(self) -> int:
return self._length
@property
def full(self) -> bool:
return self._max_avail_space == 0
@property
def empty(self) -> bool:
return self._max_avail_space == self._length
@property
def available_continuous_space(self) -> int:
return self._max_avail_space
def _update_avail_space(self) -> None:
self._max_avail_space = 0
for anchor in self._avail_positions:
space = 0
for i in range(anchor, self._length):
if i in self._avail_positions:
space += 1
else:
break
self._max_avail_space = max(space, self._max_avail_space)
def to_array_repr(self) -> List[int]:
l_repr = [0] * self._length
for anchor, brick in self._bricks.items():
for i in range(anchor, anchor + brick.length):
l_repr[i] = brick.color.value
return l_repr
def remove_random_brick(self) -> Brick:
anchor, brick = random.choice(list(self._bricks.items()))
del self._bricks[anchor]
# update available space
for i in range(anchor, anchor + brick.length):
self._avail_positions.add(i)
self._update_avail_space()
return brick
def add_brick(self, brick: Brick) -> None:
if brick.length > self._max_avail_space:
raise _NotEnoughSpace()
fitting_anchors = []
for anchor in self._avail_positions:
# first peg of brick goes on top of anchor
endpoint = anchor - 1 + brick.length
if endpoint >= self._length:
# tail of brick ends up outside of row
continue
else:
fits = True
for i in range(anchor, endpoint + 1):
if i not in self._avail_positions:
fits = False
break
if fits:
fitting_anchors.append(anchor)
anchor = random.choice(fitting_anchors)
self._bricks[anchor] = brick
# update available space
for i in range(anchor, anchor + brick.length):
self._avail_positions.remove(i)
self._update_avail_space()
class BrickBoard:
def __init__(self, base_brick: Brick):
self._rows = []
self._base = BrickRow(base_brick.length)
self._width = base_brick.length
self._base.add_brick(base_brick)
self._base_brick = base_brick
def copy(self) -> BrickBoard:
other = BrickBoard(self._base_brick.copy())
other._rows = [row.copy() for row in self._rows]
return other
def __eq__(self, other: BrickBoard):
try:
assert self.row_count == other.row_count
assert self._base == other._base
for row1, row2 in zip(self.rows(), other.rows()):
assert row1 == row2
return True
except AssertionError:
return False
def rows(self) -> Iterator[BrickRow]:
for row in self._rows:
yield row
@property
def brick_count(self) -> int:
return sum([row.brick_count for row in (self._rows + [self._base])])
@property
def base_color(self) -> LEGOColorID:
return self._base_brick.color
@property
def row_count(self) -> int:
return len(self._rows) + 1
@property
def empty(self) -> bool:
return len(self._rows) == 0
@property
def width(self) -> int:
return self._width
@property
def avail_space_in_row(self):
if len(self._rows) < 1 or self._rows[-1].full:
return self._width
else:
return self._rows[-1].available_continuous_space
def add_brick(self, brick: Brick) -> None:
if len(self._rows) < 1 or self._rows[-1].full:
self._rows.append(BrickRow(self._width))
self._rows[-1].add_brick(brick)
def remove_random_brick(self) -> Brick:
if len(self._rows) < 1:
raise _NotEnoughBricks()
brick = self._rows[-1].remove_random_brick()
if self._rows[-1].empty:
self._rows.pop(-1)
return brick
def to_array_repr(self) -> List[List[int]]:
return [row.to_array_repr() for row
in reversed([self._base] + self._rows)]
def clear(self):
self._rows = []
class BrickCollection(object):
def __init__(self, collection_dict=Dict[Brick, int]):
super(BrickCollection, self).__init__()
self._orig_collection = collection_dict.copy()
self._collection = []
for brick, count in collection_dict.items():
self._collection += ([brick] * count)
@property
def brick_count(self) -> int:
return len(self._collection)
def reset(self):
self._collection = []
for brick, count in self._orig_collection.items():
self._collection += ([brick] * count)
def put_brick(self, brick: Brick) -> None:
self._collection.append(brick)
def get_brick(self, length: int, color: LEGOColorID) -> Optional[Brick]:
tmp_brick = Brick(length, color)
if tmp_brick in self._collection:
self._collection.remove(tmp_brick)
return tmp_brick
else:
return None
def get_random_brick(self, max_len: int = 6) -> Brick:
try:
selected_brick = random.choice([b for b in self._collection
if len(b) <= max_len])
self._collection.remove(selected_brick)
except IndexError as e:
raise _NotEnoughBricks() from e
return selected_brick
def gen_random_task(task_length: int,
collection: BrickCollection,
starting_board: BrickBoard) \
-> List[BrickBoard]:
assert task_length % 2 == 0
states = [starting_board.copy()]
step_cnt = 0
while True:
if starting_board.brick_count - 1 == task_length - step_cnt:
break
build = random.choice([True] * 5
+ [False]) and collection.brick_count > 0
if build or starting_board.empty:
try:
brick = collection.get_random_brick(
max_len=starting_board.avail_space_in_row)
starting_board.add_brick(brick)
except _NotEnoughBricks:
collection.put_brick(starting_board.remove_random_brick())
else:
collection.put_brick(starting_board.remove_random_brick())
states.append(starting_board.copy())
step_cnt += 1
while not starting_board.empty:
collection.put_brick(starting_board.remove_random_brick())
states.append(starting_board.copy())
return states
def gen_random_latinsqr_task(min_task_len: int,
delays: List[float],
square_size: int,
collection: BrickCollection) -> List[pd.DataFrame]:
avg_task_len = 2 * min_task_len
max_task_len = 3 * min_task_len
base_board = BrickBoard(collection.get_brick(6, LEGOColorID.RED))
# min_task = gen_random_task(min_task_len, collection, base_board)
# avg_task = gen_random_task(avg_task_len, collection, base_board)
# max_task = gen_random_task(max_task_len, collection, base_board)
combinations = []
for d in delays:
for task_len in (min_task_len, avg_task_len, max_task_len):
# if random.choice([True, False]):
# task = list(reversed(task))
combinations.append(
(d, gen_random_task(task_len, collection, base_board)))
sqr = [random.sample(combinations, k=len(combinations))]
for i in range(1, square_size):
sqr.append(sqr[i - 1][-1:] + sqr[i - 1][:-1])
for i in range(square_size):
sqr.append(list(reversed(sqr[i])))
# unroll everything
latin_sqr = []
for i in range(len(sqr)):
task_seq = sqr[i]
task_df = pd.DataFrame(columns=['delay', 'state'])
task_df = task_df.append({
'delay': 0,
'state': base_board.to_array_repr()
}, ignore_index=True)
for d, steps in task_seq:
for step in steps[1:]: # skip first step in each subtask
task_df = task_df.append({
'delay': d,
'state': step.to_array_repr()
}, ignore_index=True)
latin_sqr.append(task_df)
return latin_sqr
Life_of_George_Bricks = BrickCollection(
collection_dict={
# black bricks
# Brick(1, LEGOColorID.BLACK) : 8,
# Brick(2, LEGOColorID.BLACK) : 6,
# Brick(6, LEGOColorID.BLACK) : 2,
# Brick(4, LEGOColorID.BLACK) : 4,
# Brick(3, LEGOColorID.BLACK) : 4,
# blue bricks
Brick(1, LEGOColorID.BLUE) : 6,
Brick(2, LEGOColorID.BLUE) : 8,
Brick(6, LEGOColorID.BLUE) : 2,
Brick(4, LEGOColorID.BLUE) : 4,
Brick(3, LEGOColorID.BLUE) : 4,
# red bricks
Brick(1, LEGOColorID.RED) : 6,
Brick(2, LEGOColorID.RED) : 8,
Brick(6, LEGOColorID.RED) : 2,
Brick(4, LEGOColorID.RED) : 4,
Brick(3, LEGOColorID.RED) : 4,
# yellow bricks
Brick(1, LEGOColorID.YELLOW): 6,
Brick(2, LEGOColorID.YELLOW): 8,
Brick(6, LEGOColorID.YELLOW): 2,
Brick(4, LEGOColorID.YELLOW): 4,
Brick(3, LEGOColorID.YELLOW): 4,
# green bricks
Brick(1, LEGOColorID.GREEN) : 6,
Brick(2, LEGOColorID.GREEN) : 8,
Brick(6, LEGOColorID.GREEN) : 2,
Brick(4, LEGOColorID.GREEN) : 4,
Brick(3, LEGOColorID.GREEN) : 4,
# white bricks
# Brick(1, LEGOColorID.WHITE) : 8,
# Brick(2, LEGOColorID.WHITE) : 6,
# Brick(6, LEGOColorID.WHITE) : 2,
# Brick(4, LEGOColorID.WHITE) : 4,
# Brick(3, LEGOColorID.WHITE) : 4,
}
)
class GeneratorTests(unittest.TestCase):
def setUp(self) -> None:
self._valid_colors = [color for color in LEGOColorID
if color != LEGOColorID.NOTHING]
self._row = BrickRow(6)
self._board = BrickBoard(
Life_of_George_Bricks.get_brick(6, LEGOColorID.RED))
def tearDown(self) -> None:
self._row.clear()
self._board.clear()
Life_of_George_Bricks.reset()
def test_bricks(self):
for color_id in self._valid_colors:
length = random.randint(1, 10)
brick = Brick(length, color_id)
self.assertListEqual(brick.to_array_repr(),
[color_id.value] * length)
def test_add_brick_to_empty_row(self):
for i in range(1, 7):
brick_cnt = self._row.brick_count
brick = Brick(i, random.choice(self._valid_colors))
self._row.add_brick(brick)
self.assertLess(self._row.available_continuous_space,
self._row.length)
self.assertEqual(self._row.brick_count, brick_cnt + 1)
if brick.length == self._row.length:
self.assertEqual(0, self._row.available_continuous_space)
self._row.clear()
def test_fill_row(self):
self._row.add_brick(Brick(int(np.floor(self._row.length / 2)),
random.choice(self._valid_colors)))
while not self._row.full:
self._row.add_brick(Brick(self._row._max_avail_space,
random.choice(self._valid_colors)))
self.assertNotIn(0, self._row.to_array_repr(),
self._row.to_array_repr())
def test_remove_brick(self):
# fill row first
self.test_fill_row()
brick_cnt = self._row.brick_count
brick = self._row.remove_random_brick()
self.assertEqual(brick_cnt - 1, self._row.brick_count)
self.assertEqual(brick.length, self._row.available_continuous_space,
(self._row.to_array_repr(), brick.to_array_repr()))
def test_empty_row(self):
# fill row first
self.test_fill_row()
while not self._row.empty:
_ = self._row.remove_random_brick()
self.assertEqual(self._row.to_array_repr(),
[0] * self._row.length)
self.assertEqual(0, self._row.brick_count)
def test_add_too_big_brick(self):
# fill row first
self.test_fill_row()
brick = self._row.remove_random_brick()
brick_cnt = self._row.brick_count
with self.assertRaises(_NotEnoughSpace):
self._row.add_brick(Brick(brick.length + 1, brick.color))
self.assertEqual(brick_cnt, self._row.brick_count)
def test_replace_brick_with_smaller_bricks(self):
# fill row first
self.test_fill_row()
brick_cnt = self._row.brick_count
brick = self._row.remove_random_brick()
while brick.length <= 1:
self._row.add_brick(brick)
brick = self._row.remove_random_brick()
# split brick
brick_1 = Brick(brick.length - 1, brick.color)
brick_2 = Brick(1, brick.color)
self._row.add_brick(brick_1)
self._row.add_brick(brick_2)
self.assertTrue(self._row.full)
self.assertNotIn(0, self._row.to_array_repr())
self.assertEqual(brick_cnt + 1, self._row.brick_count)
def test_init_table(self):
self.assertEqual(1, self._board.row_count)
l_repr = self._board.to_array_repr()
self.assertListEqual(l_repr,
[[self._board.base_color.value]
* self._board.width])
self.assertEqual(1, self._board.brick_count)
def test_add_brick_empty_table(self):
self.assertEqual(1, self._board.row_count)
brick = Brick(random.randint(1, self._board.width),
random.choice(self._valid_colors))
self._board.add_brick(brick)
self.assertEqual(2, self._board.row_count)
self.assertEqual(2, self._board.brick_count)
def test_fill_table(self):
self.assertEqual(1, self._board.row_count)
brick_cnt = self._board.brick_count
# add a 100 rows
for i in range(100):
self._board.add_brick(Brick(int(np.floor(self._board.width / 2)),
random.choice(self._valid_colors)))
brick_cnt += 1
self.assertEqual(brick_cnt, self._board.brick_count)
while self._board.avail_space_in_row < self._board.width:
self._board.add_brick(Brick(self._board.avail_space_in_row,
random.choice(self._valid_colors)))
brick_cnt += 1
self.assertEqual(brick_cnt, self._board.brick_count)
self.assertEqual(100 + 1, self._board.row_count)
for row in self._board.rows():
self.assertTrue(row.full)
self.assertNotIn(0, row.to_array_repr())
def test_empty_table(self):
# fill first
self.test_fill_table()
brick_cnt = self._board.brick_count
while not self._board.empty:
_ = self._board.remove_random_brick()
brick_cnt -= 1
self.assertEqual(brick_cnt, self._board.brick_count)
self.assertEqual(1, self._board.brick_count)
self.assertEqual(1, self._board.row_count)
def test_random_task_gen(self):
steps = 200
task = gen_random_task(steps, Life_of_George_Bricks, self._board)
self.assertEqual(self._board, task[0])
self.assertEqual(self._board, task[-1])
for i in range(1, len(task)):
state1 = task[i - 1]
state2 = task[i]
self.assertEqual(1, abs(state1.brick_count - state2.brick_count))
self.assertEqual(steps + 1, len(task))
if __name__ == '__main__':
pd.set_option('display.max_rows', None)
| pd.set_option('display.max_columns', None) | pandas.set_option |
import re
import io
import demjson
import requests
import numpy as np
import pandas as pd
from fake_useragent import UserAgent
# TODO need add comments
url = {
"eastmoney": "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx",
"fred_econ": "https://fred.stlouisfed.org/graph/fredgraph.csv?",
"OECD": "https://stats.oecd.org/sdmx-json/data/DP_LIVE/"
}
# https://fred.stlouisfed.org/release/tables?rid=205&eid=712378
def gdp_quarterly():
"""
ABS: absolute value (per 100 million CNY)
YoY: year on year growth
Data source: http://data.eastmoney.com/cjsj/gdp.html
"""
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable7519513",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "20",
"_": "1622020352668"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Absolute_Value",
"YoY_Rate",
"Primary_Industry_ABS",
"Primary_Industry_YoY_Rate",
"Secondary_Industry_ABS",
"Secondary_Industry_YoY_Rate",
"Tertiary_Industry_ABS",
"Tertiary_Industry_YoY_Rate",
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df["Absolute_Value"] = df["Absolute_Value"].astype(float)
df["Secondary_Industry_ABS"] = df["Secondary_Industry_ABS"].astype(float)
df["Tertiary_Industry_ABS"] = df["Tertiary_Industry_ABS"].astype(float)
df["Absolute_Value"] = df["Absolute_Value"].astype(float)
df["YoY_Rate"] = df["YoY_Rate"].astype(float) / 100
df["Secondary_Industry_YoY_Rate"] = df["Secondary_Industry_YoY_Rate"].astype(
float) / 100
df["Tertiary_Industry_YoY_Rate"] = df["Tertiary_Industry_YoY_Rate"].astype(
float) / 100
return df
def ppi_monthly():
"""
ABS: absolute value (per 100 million CNY)
YoY: year on year growth
Accum: Accumulation
Data source: http://data.eastmoney.com/cjsj/ppi.html
"""
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable9051497",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "22",
"_": "1622047940401"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Current_Month",
"Current_Month_YoY_Rate",
"Current_Month_Accum"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df["Current_Month"] = df["Current_Month"].astype(float)
df["Current_Month_YoY_Rate"] = df["Current_Month_YoY_Rate"].astype(
float) / 100
df["Current_Month_Accum"] = df["Current_Month_Accum"].astype(float)
return df
def cpi_monthly():
"""
Accum: Accumulation
YoY: year on year growth
MoM: month on month growth
Data source: http://data.eastmoney.com/cjsj/cpi.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable2790750",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "19",
"_": "1622020352668"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Notion_Monthly",
"Notion_YoY_Rate",
"Notion_MoM_Rate",
"Notion_Accum",
"Urban_Monthly",
"Urban_YoY_Rate",
"Urban_MoM_Rate",
"Urban_Accum",
"Rural_Monthly",
"Rural_YoY_Rate",
"Rural_MoM_Rate",
"Rural_Accum",
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Notion_Monthly",
"Notion_Accum",
"Urban_Monthly",
"Urban_Accum",
"Rural_Monthly",
"Rural_Accum"]] = df[["Notion_Monthly",
"Notion_Accum",
"Urban_Monthly",
"Urban_Accum",
"Rural_Monthly",
"Rural_Accum"]].astype(float)
df[["Notion_YoY_Rate",
"Notion_MoM_Rate",
"Urban_YoY_Rate",
"Urban_MoM_Rate",
"Rural_YoY_Rate",
"Rural_MoM_Rate"]] = df[["Notion_YoY_Rate",
"Notion_MoM_Rate",
"Urban_YoY_Rate",
"Urban_MoM_Rate",
"Rural_YoY_Rate",
"Rural_MoM_Rate"]].astype(float) / 100
return df
def pmi_monthly():
"""
Man: manufacturing
Non-Man: Non-manufacturing
Data Source: http://data.eastmoney.com/cjsj/pmi.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable4515395",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "2",
"ps": "200",
"mkt": "21",
"_": "162202151821"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Man_Industry_Index",
"Man_Index_YoY_Rate",
"Non-Man_Industry_Index",
"Non-Man_Index_YoY_Rate",
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Man_Industry_Index", "Non-Man_Industry_Index"]] = \
df[["Man_Industry_Index", "Non-Man_Industry_Index"]].astype(float)
df[["Man_Index_YoY_Rate", "Non-Man_Index_YoY_Rate"]] = \
df[["Man_Index_YoY_Rate", "Non-Man_Index_YoY_Rate"]].astype(float) / 100
return df
def fai_monthly(): # fix asset investment
"""
Man: manufacturing
Non-Man: Non-manufacturing
Data Source: http://data.eastmoney.com/cjsj/gdzctz.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable607120",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "12",
"_": "1622021790947"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Current_Month",
"YoY_Rate",
"MoM_Rate",
"Current_Year_Accum"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Current_Month", "Current_Year_Accum"]] = \
df[["Current_Month", "Current_Year_Accum"]].astype(float)
df[["YoY_Rate", "MoM_Rate"]] = \
df[["YoY_Rate", "MoM_Rate"]].astype(float) / 100
return df
def hi_old_monthly(): # house index old version (2008-2010)
"""
Man: manufacturing
Non-Man: Non-manufacturing
Data Source: http://data.eastmoney.com/cjsj/house.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable1895714",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "10",
"_": "1622022794457"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Housing_Prosperity_Index",
"HPI_YoY_Rate",
"Land_Development_Area_Index",
"LDAI_YoY_Rate",
"Sales_Price_Index",
"SPI_YoY_Rate"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Housing_Prosperity_Index",
"Land_Development_Area_Index",
"Sales_Price_Index"]] = df[["Housing_Prosperity_Index",
"Land_Development_Area_Index",
"Sales_Price_Index"]].astype(float)
df[["HPI_YoY_Rate", "LDAI_YoY_Rate", "SPI_YoY_Rate"]] = \
df[["HPI_YoY_Rate", "LDAI_YoY_Rate", "SPI_YoY_Rate"]].astype(float) / 100
return df
# mkt=1&stat=2&city1=%E5%B9%BF%E5%B7%9E&city2=%E4%B8%8A%E6%B5%B7
# newly built commercial housing & second-hand commercial housing
def hi_new_monthly(city1: str, city2: str):
"""
Man: manufacturing
Non-Man: Non-manufacturing
Data Source: http://data.eastmoney.com/cjsj/newhouse.html
"""
tmp_url = "http://data.eastmoney.com/dataapi/cjsj/getnewhousechartdata?"
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params_nbch_MoM = {
"mkt": "1",
"stat": "2",
"city1": "{}".format(city1),
"city2": "{}".format(city2)
}
request_params_shch_MoM = {
"mkt": "1",
"stat": "3",
"city1": "{}".format(city1),
"city2": "{}".format(city2)
}
r_nbch_MoM = requests.get(
tmp_url,
params=request_params_nbch_MoM,
headers=request_header)
r_shch_MoM = requests.get(
tmp_url,
params=request_params_shch_MoM,
headers=request_header)
data_text_nbch_MoM = r_nbch_MoM.text
data_text_shch_MoM = r_shch_MoM.text
data_json_nbch_MoM = demjson.decode(data_text_nbch_MoM)
data_json_shch_MoM = demjson.decode(data_text_shch_MoM)
date_nbch = data_json_nbch_MoM['chart']['series']['value']
data1_nbch_MoM = data_json_nbch_MoM['chart']['graphs']['graph'][0]['value']
data2_nbch_MoM = data_json_nbch_MoM['chart']['graphs']['graph'][1]['value']
data1_shch_MoM = data_json_shch_MoM['chart']['graphs']['graph'][0]['value']
data2_shch_MoM = data_json_shch_MoM['chart']['graphs']['graph'][1]['value']
df_MoM = pd.DataFrame({"Date": date_nbch,
"City1_nbch_MoM": data1_nbch_MoM,
"City1_shch_MoM": data1_shch_MoM,
"City2_nbch_MoM": data2_nbch_MoM,
"City2_shch_MoM": data2_shch_MoM})
df_MoM["Date"] = pd.to_datetime(df_MoM["Date"], format="%m/%d/%Y")
request_params_nbch_YoY = {
"mkt": "2",
"stat": "2",
"city1": "{}".format(city1),
"city2": "{}".format(city2)
}
request_params_shch_YoY = {
"mkt": "2",
"stat": "3",
"city1": "{}".format(city1),
"city2": "{}".format(city2)
}
r_nbch_YoY = requests.get(
tmp_url,
params=request_params_nbch_YoY,
headers=request_header)
r_shch_YoY = requests.get(
tmp_url,
params=request_params_shch_YoY,
headers=request_header)
data_text_nbch_YoY = r_nbch_YoY.text
data_text_shch_YoY = r_shch_YoY.text
data_json_nbch_YoY = demjson.decode(data_text_nbch_YoY)
data_json_shch_YoY = demjson.decode(data_text_shch_YoY)
date_nbch = data_json_nbch_YoY['chart']['series']['value']
data1_nbch_YoY = data_json_nbch_YoY['chart']['graphs']['graph'][0]['value']
data2_nbch_YoY = data_json_nbch_YoY['chart']['graphs']['graph'][1]['value']
data1_shch_YoY = data_json_shch_YoY['chart']['graphs']['graph'][0]['value']
data2_shch_YoY = data_json_shch_YoY['chart']['graphs']['graph'][1]['value']
df_YoY = pd.DataFrame({"Date": date_nbch,
"City1_nbch_YoY": data1_nbch_YoY,
"City1_shch_YoY": data1_shch_YoY,
"City2_nbch_YoY": data2_nbch_YoY,
"City2_shch_YoY": data2_shch_YoY})
df_YoY["Date"] = pd.to_datetime(df_YoY["Date"], format="%m/%d/%Y")
df = df_YoY.merge(df_MoM, on="Date")
return df
def ci_eei_monthly(): # Climate Index & Entrepreneur Expectation Index
"""
Man: manufacturing
Non-Man: Non-manufacturing
Data Source: http://data.eastmoney.com/cjsj/qyjqzs.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable7709842",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "8",
"_": "1622041485306"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Climate_Index",
"CI_YoY_Rate",
"CI_MoM_Rate",
"Entrepreneur_Expectation_Index",
"EEI_YoY_Rate",
"EEI_MoM_Rate"
]
df.replace('', np.nan, inplace=True)
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Climate_Index", "Entrepreneur_Expectation_Index"]] = \
df[["Climate_Index", "Entrepreneur_Expectation_Index"]].astype(float)
df[["CI_YoY_Rate", "CI_MoM_Rate", "EEI_YoY_Rate", "EEI_MoM_Rate"]] = df[[
"CI_YoY_Rate", "CI_MoM_Rate", "EEI_YoY_Rate", "EEI_MoM_Rate"]].astype(float) / 100
return df
def ig_monthly(): # Industry Growth
"""
Man: manufacturing
Non-Man: Non-manufacturing
Data Source: http://data.eastmoney.com/cjsj/gyzjz.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable4577327",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "0",
"_": "1622042259898"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"IG_YoY_Rate",
"IG_Accum_Rate",
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["IG_YoY_Rate", "IG_Accum_Rate"]] = \
df[["IG_YoY_Rate", "IG_Accum_Rate"]].astype(float) / 100
return df
def cgpi_monthly(): # Corporate Goods Price Index
"""
Man: manufacturing
Non-Man: Non-manufacturing
Data Source: http://data.eastmoney.com/cjsj/qyspjg.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable7184534",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "9",
"_": "1622042652353"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"General_Index",
"General_Index_YoY_Rate",
"Total_Index_MoM_Rate",
"Agricultural_Product",
"Agricultural_Product_YoY_Rate",
"Agricultural_Product_MoM_Rate",
"Mineral_Product",
"Mineral_Product_YoY_Rate",
"Mineral_Product_MoM_Rate",
"Coal_Oil_Electricity",
"Coal_Oil_Electricity_YoY_Rate",
"Coal_Oil_Electricity_MoM_Rate"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["General_Index",
"Agricultural_Product",
"Mineral_Product",
"Coal_Oil_Electricity"]] = df[["General_Index",
"Agricultural_Product",
"Mineral_Product",
"Coal_Oil_Electricity"]].astype(float)
df[["General_Index_YoY_Rate",
"Total_Index_MoM_Rate",
"Agricultural_Product_YoY_Rate",
"Agricultural_Product_MoM_Rate",
"Coal_Oil_Electricity_YoY_Rate",
"Coal_Oil_Electricity_MoM_Rate"]] = df[["General_Index_YoY_Rate",
"Total_Index_MoM_Rate",
"Agricultural_Product_YoY_Rate",
"Agricultural_Product_MoM_Rate",
"Coal_Oil_Electricity_YoY_Rate",
"Coal_Oil_Electricity_MoM_Rate"]].astype(float) / 100
return df
def cci_csi_cei_monthly(): # Consumer Confidence Index & Consumer Satisfaction Index & Consumer Expectation Index
"""
Man: manufacturing
Non-Man: Non-manufacturing
Data Source: http://data.eastmoney.com/cjsj/xfzxx.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable1243218",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "4",
"_": "1622043704818"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"CCI",
"CCI_YoY_Rate",
"CCI_MoM_Rate",
"CSI",
"CSI_YoY_Rate",
"CSI_MoM_Rate",
"CEI",
"CEI_YoY_Rate",
"CEI_MoM_Rate"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["CCI", "CSI", "CEI"]] = \
df[["CCI", "CSI", "CEI"]].astype(float)
df[["CCI_YoY_Rate", "CCI_MoM_Rate",
"CSI_YoY_Rate", "CSI_MoM_Rate",
"CEI_YoY_Rate", "CEI_MoM_Rate"]] = \
df[["CCI_YoY_Rate", "CCI_MoM_Rate",
"CSI_YoY_Rate", "CSI_MoM_Rate",
"CEI_YoY_Rate", "CEI_MoM_Rate"]].astype(float) / 100
return df
def trscg_monthly(): # Total Retail Sales of Consumer Goods
"""
Man: manufacturing
Non-Man: Non-manufacturing
Data Source: http://data.eastmoney.com/cjsj/xfp.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable3665821",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "5",
"_": "1622044011316"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Current_Month",
"TRSCG_YoY_Rate",
"TRSCG_MoM_Rate",
"TRSCG_Accum",
"TRSCG_Accum_YoY_Rate"
]
df.replace("", np.nan, inplace=True)
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Current_Month", "TRSCG_Accum"]] = \
df[["Current_Month", "TRSCG_Accum"]].astype(float)
df[["TRSCG_YoY_Rate", "TRSCG_MoM_Rate", "TRSCG_Accum_YoY_Rate"]] = df[[
"TRSCG_YoY_Rate", "TRSCG_MoM_Rate", "TRSCG_Accum_YoY_Rate"]].astype(float) / 100
return df
def ms_monthly(): # monetary Supply
"""
Man: manufacturing
Non-Man: Non-manufacturing
Data Source: http://data.eastmoney.com/cjsj/hbgyl.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable3818891",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "11",
"_": "1622044292103"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"M2",
"M2_YoY_Rate",
"M2_MoM_Rate",
"M1",
"M1_YoY_Rate",
"M1_MoM_Rate",
"M0",
"M0_YoY_Rate",
"M0_MoM_Rate"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["M0", "M1", "M2"]] = \
df[["M0", "M1", "M2"]].astype(float)
df[["M0_YoY_Rate", "M1_YoY_Rate", "M2_YoY_Rate",
"M0_MoM_Rate", "M1_MoM_Rate", "M2_MoM_Rate"]] = \
df[["M0_YoY_Rate", "M1_YoY_Rate", "M2_YoY_Rate",
"M0_MoM_Rate", "M1_MoM_Rate", "M2_MoM_Rate"]].astype(float) / 100
return df
def ie_monthly(): # Import & Export
"""
Data Source: http://data.eastmoney.com/cjsj/hgjck.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable3818891",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "1",
"_": "1622044292103"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Current_Month_Export",
"Current_Month_Export_YoY_Rate",
"Current_Month_Export_MoM_Rate",
"Current_Month_Import",
"Current_Month_Import_YoY_Rate",
"Current_Month_Import_MoM_Rate",
"Accumulation_Export",
"Accumulation_Export_YoY_Rate",
"Accumulation_Import",
"Accumulation_Import_YoY_Rate"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df.replace("", np.nan, inplace=True)
df[["Current_Month_Export", "Current_Month_Import",
"Accumulation_Export", "Accumulation_Import"]] = \
df[["Current_Month_Export", "Current_Month_Import",
"Accumulation_Export", "Accumulation_Import"]].astype(float)
df[["Current_Month_Export_YoY_Rate",
"Current_Month_Export_MoM_Rate",
"Current_Month_Import_YoY_Rate",
"Current_Month_Import_MoM_Rate",
"Accumulation_Export_YoY_Rate",
"Accumulation_Import_YoY_Rate"]] = \
df[["Current_Month_Export_YoY_Rate",
"Current_Month_Export_MoM_Rate",
"Current_Month_Import_YoY_Rate",
"Current_Month_Import_MoM_Rate",
"Accumulation_Export_YoY_Rate",
"Accumulation_Import_YoY_Rate"]].astype(float)/100
return df
def stock_monthly(): # Stock Trading Statistics
"""
Data Source: http://data.eastmoney.com/cjsj/gpjytj.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "jQuery112308659690274138041_1622084599455",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "2",
"_": "1622084599456"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("(") + 1:-1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"SH_Total_Stock_issue",
"SZ_Total_Stock_Issue",
"SH_Total_Market_Capitalization",
"SZ_Total_Market_Capitalization",
"SH_Turnover",
"SZ_Turnover",
"SH_Volume",
"SZ_Volume",
"SH_Highest",
"SZ_Highest",
"SH_lowest",
"SZ_lowest"
]
df.replace("", np.nan, inplace=True)
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[list(df.columns[1:])] = df[list(df.columns[1:])].astype(float)
return df
def fgr_monthly(): # Forex and Gold Reserve
"""
Data Source: http://data.eastmoney.com/cjsj/gpjytj.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "tatable6260802",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "16",
"_": "1622044863548"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Forex",
"Forex_YoY_Rate",
"Forex_MoM_Rate",
"Gold",
"Gold_YoY_Rate",
"Gold_MoM_Rate"
]
df.replace("", np.nan, inplace=True)
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Forex", "Gold"]] = \
df[["Forex", "Gold"]].astype(float)
df[["Forex_YoY_Rate", "Gold_YoY_Rate",
"Forex_MoM_Rate", "Gold_MoM_Rate"]] = \
df[["Forex_YoY_Rate", "Gold_YoY_Rate",
"Forex_MoM_Rate", "Gold_MoM_Rate"]].astype(float) / 100
return df
def ctsf_monthly(): # Client Transaction Settlement Funds
"""
http://data.eastmoney.com/cjsj/banktransfer.html
"""
tmp_url = "http://data.eastmoney.com/dataapi/cjsj/getbanktransferdata?"
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"p": "1",
"ps": "200"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("["):-11])
df = pd.DataFrame(data_json)
df.replace("", np.nan, inplace=True)
df["StartDate"] = pd.to_datetime(df["StartDate"], format="%Y-%m-%d")
df["EndDate"] = pd.to_datetime(df["EndDate"], format="%Y-%m-%d")
df[list(df.columns)[2:]] = df[list(df.columns)[2:]].astype(float)
return df
# TODO: SPECIAL CASE
def sao_monthly(): # Stock Account Overview
"""
http://data.eastmoney.com/cjsj/gpkhsj.html
"""
tmp_url = "http://dcfm.eastmoney.com/em_mutisvcexpandinterface/api/js/get?"
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"callback": "datatable4006236",
"type": "GPKHData",
"js": "({data:[(x)],pages:(pc)})",
"st": "SDATE",
"sr": "-1",
"token": "<KEY>",
"p": "1",
"ps": "2000",
"_": "1622079339035"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") + 6: -14])
df = pd.DataFrame(data_json[0])
df.columns = [
"Date",
"New_Investor",
"New_Investor_MoM_Rate",
"New_Investor_YoY_Rate",
"Active_Investor",
"Active_Investor_A_Share",
"Active_Investor_B_share",
"SHIndex_Close",
"SHIndex_Rate",
"SHSZ_Market_Capitalization",
"SHSZ_Average_Capitalization"
]
df.replace("-", np.nan, inplace=True)
df.Date = pd.to_datetime(df.Date, format="%Y年%m月")
df[list(df.columns[~df.columns.isin(["Date", "New_Investor_MoM_Rate", "New_Investor_YoY_Rate"])])] = df[list(
df.columns[~df.columns.isin(["Date", "New_Investor_MoM_Rate", "New_Investor_YoY_Rate"])])].astype(float)
df[["New_Investor_MoM_Rate", "New_Investor_YoY_Rate"]] = \
df[["New_Investor_MoM_Rate", "New_Investor_YoY_Rate"]].astype(float) / 100
return df
def fdi_monthly(): # Foreign Direct Investment
"""
http://data.eastmoney.com/cjsj/fdi.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable1477466",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "15",
"_": "1622044863548"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Current_Month",
"YoY_Rate",
"MoM_Rate",
"Accumulation",
"Accum_YoY_Rate"
]
df.replace("", np.nan, inplace=True)
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Current_Month", "Accumulation"]] = \
df[["Current_Month", "Accumulation"]].astype(float)
df[["YoY_Rate", "MoM_Rate", "Accum_YoY_Rate"]] = \
df[["YoY_Rate", "MoM_Rate", "Accum_YoY_Rate"]].astype(float) / 100
return df
def gr_monthly(): # Government Revenue
"""
http://data.eastmoney.com/cjsj/czsr.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable7840652",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "14",
"_": "1622080618625"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Current_Month",
"YoY_Rate",
"MoM_Rate",
"Accumulation",
"Accum_YoY_Rate"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Current_Month", "Accumulation"]] = \
df[["Current_Month", "Accumulation"]].astype(float)
df[["YoY_Rate", "MoM_Rate", "Accum_YoY_Rate"]] = \
df[["YoY_Rate", "MoM_Rate", "Accum_YoY_Rate"]].astype(float) / 100
return df
def ti_monthly(): # Tax Income
"""
http://data.eastmoney.com/cjsj/qgsssr.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable8280567",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "3",
"_": "1622080669713"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Current_Month",
"YoY_Rate",
"MoM_Rate"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df = df.replace("", np.nan)
df[["Current_Month"]] = \
df[["Current_Month"]].astype(float)
df[["YoY_Rate", "MoM_Rate"]] = \
df[["YoY_Rate", "MoM_Rate"]].astype(float) / 100
return df
def nl_monthly(): # New Loan
"""
http://data.eastmoney.com/cjsj/xzxd.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable2533707",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "7",
"_": "1622080800162"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Current_Month",
"YoY_Rate",
"MoM_Rate",
"Accumulation",
"Accum_YoY_Rate"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Current_Month", "Accumulation"]] = \
df[["Current_Month", "Accumulation"]].astype(float)
df[["YoY_Rate", "MoM_Rate", "Accum_YoY_Rate"]] =\
df[["YoY_Rate", "MoM_Rate", "Accum_YoY_Rate"]].astype(float) / 100
return df
def dfclc_monthly(): # Deposit of Foreign Currency and Local Currency
"""
http://data.eastmoney.com/cjsj/wbck.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable2899877",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "18",
"_": "1622081057370"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Current_Month",
"YoY_Rate",
"MoM_Rate",
"Accumulation"
]
df["Date"] = | pd.to_datetime(df["Date"], format="%Y-%m-%d") | pandas.to_datetime |
import pandas as pd
import json
import os
from sklearn import preprocessing
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
base_folder = [".","output","experiments","attacks"]
default_targets = ["s","n","p","r","k","K","d","D","A","e","E"]
def heat_pivot(df, columns=["Source", "Target", "Value"], normalize=False, fig_size=(10, 10), title="", do_plot=True):
table = df.pivot(*columns)
if normalize:
x = table.values # returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
table = | pd.DataFrame(x_scaled, index=table.index, columns=table.columns) | pandas.DataFrame |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import io
import os
import copy
import math
import json
import collections
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
try:
from torch.utils.tensorboard import SummaryWriter
except:
SummaryWriter = None
from tqdm import tqdm
from qlib.utils import get_or_create_path
from qlib.log import get_module_logger
from qlib.model.base import Model
from qlib.contrib.data.dataset import MTSDatasetH
device = "cuda" if torch.cuda.is_available() else "cpu"
class TRAModel(Model):
"""
TRA Model
Args:
model_config (dict): model config (will be used by RNN or Transformer)
tra_config (dict): TRA config (will be used by TRA)
model_type (str): which backbone model to use (RNN/Transformer)
lr (float): learning rate
n_epochs (int): number of total epochs
early_stop (int): early stop when performance not improved at this step
update_freq (int): gradient update frequency
max_steps_per_epoch (int): maximum number of steps in one epoch
lamb (float): regularization parameter
rho (float): exponential decay rate for `lamb`
alpha (float): fusion parameter for calculating transport loss matrix
seed (int): random seed
logdir (str): local log directory
eval_train (bool): whether evaluate train set between epochs
eval_test (bool): whether evaluate test set between epochs
pretrain (bool): whether pretrain the backbone model before training TRA.
Note that only TRA will be optimized after pretraining
init_state (str): model init state path
freeze_model (bool): whether freeze backbone model parameters
freeze_predictors (bool): whether freeze predictors parameters
transport_method (str): transport method, can be none/router/oracle
memory_mode (str): memory mode, the same argument for MTSDatasetH
"""
def __init__(
self,
model_config,
tra_config,
model_type="RNN",
lr=1e-3,
n_epochs=500,
early_stop=50,
update_freq=1,
max_steps_per_epoch=None,
lamb=0.0,
rho=0.99,
alpha=1.0,
seed=None,
logdir=None,
eval_train=False,
eval_test=False,
pretrain=False,
init_state=None,
reset_router=False,
freeze_model=False,
freeze_predictors=False,
transport_method="none",
memory_mode="sample",
):
self.logger = get_module_logger("TRA")
assert memory_mode in ["sample", "daily"], "invalid memory mode"
assert transport_method in ["none", "router", "oracle"], f"invalid transport method {transport_method}"
assert transport_method == "none" or tra_config["num_states"] > 1, "optimal transport requires `num_states` > 1"
assert (
memory_mode != "daily" or tra_config["src_info"] == "TPE"
), "daily transport can only support TPE as `src_info`"
if transport_method == "router" and not eval_train:
self.logger.warning("`eval_train` will be ignored when using TRA.router")
if seed is not None:
np.random.seed(seed)
torch.manual_seed(seed)
self.model_config = model_config
self.tra_config = tra_config
self.model_type = model_type
self.lr = lr
self.n_epochs = n_epochs
self.early_stop = early_stop
self.update_freq = update_freq
self.max_steps_per_epoch = max_steps_per_epoch
self.lamb = lamb
self.rho = rho
self.alpha = alpha
self.seed = seed
self.logdir = logdir
self.eval_train = eval_train
self.eval_test = eval_test
self.pretrain = pretrain
self.init_state = init_state
self.reset_router = reset_router
self.freeze_model = freeze_model
self.freeze_predictors = freeze_predictors
self.transport_method = transport_method
self.use_daily_transport = memory_mode == "daily"
self.transport_fn = transport_daily if self.use_daily_transport else transport_sample
self._writer = None
if self.logdir is not None:
if os.path.exists(self.logdir):
self.logger.warning(f"logdir {self.logdir} is not empty")
os.makedirs(self.logdir, exist_ok=True)
if SummaryWriter is not None:
self._writer = SummaryWriter(log_dir=self.logdir)
self._init_model()
def _init_model(self):
self.logger.info("init TRAModel...")
self.model = eval(self.model_type)(**self.model_config).to(device)
print(self.model)
self.tra = TRA(self.model.output_size, **self.tra_config).to(device)
print(self.tra)
if self.init_state:
self.logger.warning(f"load state dict from `init_state`")
state_dict = torch.load(self.init_state, map_location="cpu")
self.model.load_state_dict(state_dict["model"])
res = load_state_dict_unsafe(self.tra, state_dict["tra"])
self.logger.warning(str(res))
if self.reset_router:
self.logger.warning(f"reset TRA.router parameters")
self.tra.fc.reset_parameters()
self.tra.router.reset_parameters()
if self.freeze_model:
self.logger.warning(f"freeze model parameters")
for param in self.model.parameters():
param.requires_grad_(False)
if self.freeze_predictors:
self.logger.warning(f"freeze TRA.predictors parameters")
for param in self.tra.predictors.parameters():
param.requires_grad_(False)
self.logger.info("# model params: %d" % sum([p.numel() for p in self.model.parameters() if p.requires_grad]))
self.logger.info("# tra params: %d" % sum([p.numel() for p in self.tra.parameters() if p.requires_grad]))
self.optimizer = optim.Adam(list(self.model.parameters()) + list(self.tra.parameters()), lr=self.lr)
self.fitted = False
self.global_step = -1
def train_epoch(self, epoch, data_set, is_pretrain=False):
self.model.train()
self.tra.train()
data_set.train()
self.optimizer.zero_grad()
P_all = []
prob_all = []
choice_all = []
max_steps = len(data_set)
if self.max_steps_per_epoch is not None:
if epoch == 0 and self.max_steps_per_epoch < max_steps:
self.logger.info(f"max steps updated from {max_steps} to {self.max_steps_per_epoch}")
max_steps = min(self.max_steps_per_epoch, max_steps)
cur_step = 0
total_loss = 0
total_count = 0
for batch in tqdm(data_set, total=max_steps):
cur_step += 1
if cur_step > max_steps:
break
if not is_pretrain:
self.global_step += 1
data, state, label, count = batch["data"], batch["state"], batch["label"], batch["daily_count"]
index = batch["daily_index"] if self.use_daily_transport else batch["index"]
with torch.set_grad_enabled(not self.freeze_model):
hidden = self.model(data)
all_preds, choice, prob = self.tra(hidden, state)
if is_pretrain or self.transport_method != "none":
# NOTE: use oracle transport for pre-training
loss, pred, L, P = self.transport_fn(
all_preds,
label,
choice,
prob,
state.mean(dim=1),
count,
self.transport_method if not is_pretrain else "oracle",
self.alpha,
training=True,
)
data_set.assign_data(index, L) # save loss to memory
if self.use_daily_transport: # only save for daily transport
P_all.append(pd.DataFrame(P.detach().cpu().numpy(), index=index))
prob_all.append(pd.DataFrame(prob.detach().cpu().numpy(), index=index))
choice_all.append(pd.DataFrame(choice.detach().cpu().numpy(), index=index))
decay = self.rho ** (self.global_step // 100) # decay every 100 steps
lamb = 0 if is_pretrain else self.lamb * decay
reg = prob.log().mul(P).sum(dim=1).mean() # train router to predict TO assignment
if self._writer is not None and not is_pretrain:
self._writer.add_scalar("training/router_loss", -reg.item(), self.global_step)
self._writer.add_scalar("training/reg_loss", loss.item(), self.global_step)
self._writer.add_scalar("training/lamb", lamb, self.global_step)
if not self.use_daily_transport:
P_mean = P.mean(axis=0).detach()
self._writer.add_scalar("training/P", P_mean.max() / P_mean.min(), self.global_step)
loss = loss - lamb * reg
else:
pred = all_preds.mean(dim=1)
loss = loss_fn(pred, label)
(loss / self.update_freq).backward()
if cur_step % self.update_freq == 0:
self.optimizer.step()
self.optimizer.zero_grad()
if self._writer is not None and not is_pretrain:
self._writer.add_scalar("training/total_loss", loss.item(), self.global_step)
total_loss += loss.item()
total_count += 1
if self.use_daily_transport and len(P_all):
P_all = pd.concat(P_all, axis=0)
prob_all = | pd.concat(prob_all, axis=0) | pandas.concat |
import collections
import csv
import enum
import os
from typing import MutableMapping, Text, Tuple, Iterable, List
import pandas as pd
from absl import logging
from tapas_file_utils import (list_directory, make_directories)
from tapas_text_utils import (wtq_normalize)
_TABLE_DIR_NAME = 'table_csv' # Name that the table folder has in SQA.
class Version(enum.Enum):
V_02 = 1
V_10 = 2
def _export_table(table, output_dir,
sqa_table_id):
output_file = os.path.join(output_dir, sqa_table_id)
with open(output_file, 'w') as table_out:
table.to_csv(
table_out,
sep=',',
escapechar='\\',
index=False,
quoting=csv.QUOTE_ALL,
encoding='utf-8')
def _get_reader(file_path):
return open(file_path, 'r')
def _get_sqa_file_path(input_dir, file_name):
return os.path.join(input_dir, 'data', file_name)
def _get_random_split_name(
split_number,
version,
):
"""Gets train and dev files for a split index."""
if version == Version.V_02:
name = 'random-split-seed-{}-{}.tsv'
return name.format(split_number, 'train'), name.format(split_number, 'test')
if version == Version.V_10:
name = 'random-split-{}-{}.tsv'
return name.format(split_number, 'train'), name.format(split_number, 'dev')
raise ValueError(f'Unknown version {version}')
def _get_sqa_table_id(wtq_table_id):
"""Goes from 'csv/123-csv/123.csv' to 'table_csv/123-123.csv'."""
return u'table_csv/' + wtq_table_id[4:].replace('/', '-').replace('-csv', '')
def _read_wtq_table(input_dir, wtq_table_id):
"""Reads table file as pandas frame."""
table_path = os.path.join(input_dir, wtq_table_id)
with open(table_path, 'r') as table_in:
return pd.read_csv(
table_in,
delimiter=',',
escapechar='\\',
dtype='str',
)
def _iterate_examples(
file_in,
version,
):
"""Reads examples from TSV file."""
if version == Version.V_02:
for line in file_in:
fields = line.rstrip().split('\t')
qid = fields[0]
question = fields[1]
wtq_table_id = fields[2]
answers = fields[3:]
yield qid, question, wtq_table_id, answers
if version == Version.V_10:
for line in csv.DictReader(file_in, delimiter='\t'):
# Parse question and answers.
qid = line['id']
question = line['utterance']
wtq_table_id = line['context']
answers = line['targetValue'].split('|')
yield qid, question, wtq_table_id, answers
def _convert_data(
table_cache,
input_dir,
output_dir,
file_name,
version,
):
"""Converts WTQ data to SQA TSV format."""
logging.info('Converting data from: %s...', file_name)
counter = collections.Counter() # Counter for stats.
sqa_data = [] # List of rows with data in SQA format.
with _get_reader(_get_sqa_file_path(input_dir, file_name)) as file_in:
for example in _iterate_examples(file_in, version):
# Parse question and answers.
qid, question, wtq_table_id, answers = example
sqa_table_id = _get_sqa_table_id(wtq_table_id)
# Get table from disk or from cache.
if sqa_table_id in table_cache:
table = table_cache[sqa_table_id]
else:
table = _read_wtq_table(input_dir, wtq_table_id)
table = table.applymap(wtq_normalize)
table_cache[sqa_table_id] = table
sqa_row = []
sqa_row.append(qid)
sqa_row.append('0')
sqa_row.append('0')
sqa_row.append(question)
sqa_row.append(sqa_table_id)
sqa_row.append(str(list(map(str, [(-1, -1) for _ in answers]))))
sqa_row.append(str(answers))
sqa_row.append('NONE')
sqa_row.append('')
sqa_data.append(sqa_row)
counter['questions'] += 1
if counter['questions'] % 100 == 0:
logging.info('Processed %s questions...', counter['questions'])
df_columns = [
'id', 'annotator', 'position', 'question', 'table_file',
'answer_coordinates', 'answer_text', 'aggregation', 'float_answer'
]
df = | pd.DataFrame(data=sqa_data, columns=df_columns, dtype=str) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 9 13:55:53 2021
@author: Clement
"""
import pandas
import geopandas as gpd
import numpy
import os
import sys
import datetime
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from gen_fct import file_fct
from gen_fct import df_fct
def last_update_db (dir_name, db_list):
list_dir, list_files = file_fct.list_dir_files(f'{dir_name}')
db_daily = db_list[db_list.loc[:,'update']==True]
if 'last_update.json' in list_files:
last_update = pandas.read_json(f'{dir_name}/last_update.json', orient = "table")
last_update['delta_day'] = last_update.apply(lambda x: (pandas.to_datetime('today')-x["date"]).days,axis=1)
print(last_update)
print('\n')
else:
last_update = pandas.DataFrame(index=db_daily.index, columns=['date', 'delta_day'])
last_update.loc[:,'delta_day'] = 100 #Arbitrary value
return last_update
def import_and_save(df_name, root, source_df):
save_path = os.path.normcase(f'{root}{source_df.loc[df_name, "sub_dir"]}/{source_df.loc[df_name, "file_name"]}')
file_fct.creation_folder(root,[source_df.loc[df_name, "sub_dir"]])
if source_df.loc[df_name, 'type'] == 'Pandas':
importing_df = pandas.read_csv(source_df.loc[df_name, 'link'],
sep=source_df.loc[df_name, 'sep'],
encoding=source_df.loc[df_name, 'encoding'])
importing_df.to_csv(save_path, index=False, sep=source_df.loc[df_name, 'sep'])
elif source_df.loc[df_name, 'type'] == 'GeoPandas':
importing_df = gpd.read_file(source_df.loc[df_name, 'link'])
importing_df.to_file(save_path, index=False)
return importing_df
def import_static (data_dir, db_list):
raw_data_dir = os.path.normcase(f'{data_dir}/raw')
list_dir, list_files = file_fct.list_dir_files(raw_data_dir)
df_static = db_list[db_list.loc[:,'update']==False]
for a_df_name in df_static.index:
if df_static.loc[a_df_name, 'file_name'] not in list_files:
print(f"Downloading {df_static.loc[a_df_name, 'file_name']}...", end='\x1b[1K\r')
import_and_save(a_df_name, raw_data_dir, df_static)
print(f"{df_static.loc[a_df_name, 'file_name']} downloaded")
print('\n\n')
def import_daily (data_dir, db_list, last_update_db, limit):
raw_data_dir = os.path.normcase(f'{data_dir}/raw')
df_daily = db_list[db_list.loc[:,'update']==True]
for a_df_name in df_daily.index:
if a_df_name not in last_update_db.index:
print(f"Creating and downloading {df_daily.loc[a_df_name, 'file_name']}...", end='')
df = import_and_save(a_df_name, raw_data_dir, df_daily)
delta_spaces = " "*(len(f"Creating and downloading {df_daily.loc[a_df_name, 'file_name']}...")-len(f"\r{df_daily.loc[a_df_name, 'file_name']} was downloaded"))
print(f"\r{df_daily.loc[a_df_name, 'file_name']} was downloaded {delta_spaces}")
last_update = get_dates (df, a_df_name, db_list)
last_update_db.loc[a_df_name, 'date'] = last_update
elif last_update_db.loc[a_df_name, 'delta_day'] > limit:
print(f"Downloading {df_daily.loc[a_df_name, 'file_name']}...", end='')
df = import_and_save(a_df_name, raw_data_dir, df_daily)
delta_spaces = " "*(len(f"Downloading {df_daily.loc[a_df_name, 'file_name']}...")-len(f"\r{df_daily.loc[a_df_name, 'file_name']} was downloaded"))
print(f"\r{df_daily.loc[a_df_name, 'file_name']} was downloaded {delta_spaces}")
last_update = get_dates (df, a_df_name, db_list)
last_update_db.loc[a_df_name, 'date'] = last_update
data_dir = file_fct.get_parent_dir(2, 'data')
last_update_db['delta_day'] = last_update_db.apply(lambda x: (pandas.to_datetime('today')-x["date"]).days,axis=1)
print(last_update_db)
last_update_db.loc[:,'date'] = last_update_db.apply(lambda x: x["date"].strftime("%Y-%m-%d"), axis=1)
last_update_db.to_json(f'{data_dir}/last_update.json', orient = "table", indent=4)
print('\n\n')
def get_dates (df, df_name, db_list):
db_list = db_list.fillna('')
if db_list.loc[df_name, 'drop_col'] != '':
df = df.drop(columns=db_list.loc[df_name, 'drop_col'].split(','))
if db_list.loc[df_name, 'id_vars'] != '':
df = pandas.melt(df, id_vars=db_list.loc[df_name, 'id_vars'].split(','), var_name=db_list.loc[df_name, 'index_name'])
df = df.set_index(db_list.loc[df_name, 'index_name'])
if db_list.loc[df_name, 'drop_val'] != '':
df = df.drop(index=db_list.loc[df_name, 'drop_val'].split(','))
if db_list.loc[df_name, 'date_format'] != '':
df.index = pandas.to_datetime(df.index, format=db_list.loc[df_name, 'date_format'])
else:
df.index = | pandas.to_datetime(df.index) | pandas.to_datetime |
#code will get the proper values like emyield, marketcap, cacl, etc, and supply a string and value to put back into the dataframe.
import pandas as pd
import numpy as np
import logging
import inspect
from scipy import stats
from dateutil.relativedelta import relativedelta
from datetime import datetime
from scipy import stats
import math
class quantvaluedata: #just contains functions, will NEVEFR actually get the data
def __init__(self,allitems=None):
if allitems is None:
self.allitems=[]
else:
self.allitems=allitems
return
def get_value(self,origdf,key,i=-1):
if key not in origdf.columns and key not in self.allitems and key not in ['timedepositsplaced','fedfundssold','interestbearingdepositsatotherbanks']:
logging.error(key+' not found in allitems')
#logging.error(self.allitems)
return None
df=origdf.copy()
df=df.sort_values('yearquarter')
if len(df)==0:
##logging.error("empty dataframe")
return None
if key not in df.columns:
#logging.error("column not found:"+key)
return None
interested_quarter=df['yearquarter'].iloc[-1]+i+1#because if we want the last quarter we need them equal
if not df['yearquarter'].isin([interested_quarter]).any(): #if the quarter we are interested in is not there
return None
s=df['yearquarter']==interested_quarter
df=df[s]
if len(df)>1:
logging.error(df)
logging.error("to many rows in df")
exit()
pass
value=df[key].iloc[0]
if pd.isnull(value):
return None
return float(value)
def get_sum_quarters(self,df,key,seed,length):
values=[]
#BIG BUG, this was origionally -length-1, which was always truncating the array and producing nans.
periods=range(seed,seed-length,-1)
for p in periods:
values.append(self.get_value(df,key,p))
#logging.info('values:'+str(values))
if pd.isnull(values).any(): #return None if any of the values are None
return None
else:
return float(np.sum(values))
def get_market_cap(self,statements_df,prices_df,seed=-1):
total_shares=self.get_value(statements_df,'weightedavedilutedsharesos',seed)
if pd.isnull(total_shares):
return None
end_date=statements_df['end_date'].iloc[seed]
if seed==-1: #get the latest price but see if there was a split between the end date and now
s=pd.to_datetime(prices_df['date'])>pd.to_datetime(end_date)
tempfd=prices_df[s]
splits=tempfd['split_ratio'].unique()
adj=pd.Series(splits).product() #multiply all the splits together to get the total adjustment factor from the last total_shares
total_shares=total_shares*adj
last_price=prices_df.sort_values('date').iloc[-1]['close']
price=float(last_price)
market_cap=price*float(total_shares)
return market_cap
else:
marketcap=self.get_value(statements_df,'marketcap',seed)
if pd.isnull(marketcap):
return None
else:
return marketcap
def get_netdebt(self,statements_df,seed=-1):
shorttermdebt=self.get_value(statements_df,'shorttermdebt',seed)
longtermdebt=self.get_value(statements_df,'longtermdebt',seed)
capitalleaseobligations=self.get_value(statements_df,'capitalleaseobligations',seed)
cashandequivalents=self.get_value(statements_df,'cashandequivalents',seed)
restrictedcash=self.get_value(statements_df,'restrictedcash',seed)
fedfundssold=self.get_value(statements_df,'fedfundssold',seed)
interestbearingdepositsatotherbanks=self.get_value(statements_df,'interestbearingdepositsatotherbanks',seed)
timedepositsplaced=self.get_value(statements_df,'timedepositsplaced',seed)
s=pd.Series([shorttermdebt,longtermdebt,capitalleaseobligations,cashandequivalents,restrictedcash,fedfundssold,interestbearingdepositsatotherbanks,timedepositsplaced]).astype('float')
if pd.isnull(s).all(): #return None if everything is null
return None
m=pd.Series([1,1,1,-1,-1,-1,-1])
netdebt=s.multiply(m).sum()
return float(netdebt)
def get_enterprise_value(self,statements_df,prices_df,seed=-1):
#calculation taken from https://intrinio.com/data-tag/enterprisevalue
marketcap=self.get_market_cap(statements_df,prices_df,seed)
netdebt=self.get_netdebt(statements_df,seed)
totalpreferredequity=self.get_value(statements_df,'totalpreferredequity',seed)
noncontrollinginterests=self.get_value(statements_df,'noncontrollinginterests',seed)
redeemablenoncontrollinginterest=self.get_value(statements_df,'redeemablenoncontrollinginterest',seed)
s=pd.Series([marketcap,netdebt,totalpreferredequity,noncontrollinginterests,redeemablenoncontrollinginterest])
if pd.isnull(s).all() or pd.isnull(marketcap):
return None
return float(s.sum())
def get_ebit(self,df,seed=-1,length=4):
ebit=self.get_sum_quarters(df,'totaloperatingincome',seed,length)
if pd.notnull(ebit):
return float(ebit)
totalrevenue=self.get_sum_quarters(df,'totalrevenue',seed,length)
provisionforcreditlosses=self.get_sum_quarters(df,'provisionforcreditlosses',seed,length)
totaloperatingexpenses=self.get_sum_quarters(df,'totaloperatingexpenses',seed,length)
s=pd.Series([totalrevenue,provisionforcreditlosses,totaloperatingexpenses])
if pd.isnull(s).all():
return None
ebit=(s.multiply(pd.Series([1,-1,-1]))).sum()
if pd.notnull(ebit):
return float(ebit)
return None
def get_emyield(self,statements_df,prices_df,seed=-1,length=4):
ebit=self.get_ebit(statements_df,seed,length)
enterprisevalue=self.get_enterprise_value(statements_df,prices_df,seed)
if pd.isnull([ebit,enterprisevalue]).any() or enterprisevalue==0:
return None
return float(ebit/enterprisevalue)
def get_scalednetoperatingassets(self,statements_df,seed=-1):
"""
SNOA = (Operating Assets Operating Liabilities) / Total Assets
where
OA = total assets cash and equivalents
OL = total assets ST debt LT debt minority interest - preferred stock - book common
oa=ttmsdfcompany.iloc[-1]['totalassets']-ttmsdfcompany.iloc[-1]['cashandequivalents']
ol=ttmsdfcompany.iloc[-1]['totalassets']-ttmsdfcompany.iloc[-1]['netdebt']-ttmsdfcompany.iloc[-1]['totalequityandnoncontrollinginterests']
snoa=(oa-ol)/ttmsdfcompany.iloc[-1]['totalassets']
"""
totalassets=self.get_value(statements_df,'totalassets',seed)
cashandequivalents=self.get_value(statements_df,'cashandequivalents',seed)
netdebt=self.get_netdebt(statements_df,seed)
totalequityandnoncontrollinginterests=self.get_value(statements_df,'totalequityandnoncontrollinginterests',seed)
if pd.isnull(totalassets) or totalassets==0:
return None
s=pd.Series([totalassets,cashandequivalents])
m=pd.Series([1,-1])
oa=s.multiply(m).sum()
s=pd.Series([totalassets,netdebt,totalequityandnoncontrollinginterests])
m=pd.Series([1,-1,-1])
ol=s.multiply(m).sum()
scalednetoperatingassets=(oa-ol)/totalassets
return float(scalednetoperatingassets)
def get_scaledtotalaccruals(self,statements_df,seed=-1,length=4):
netincome=self.get_sum_quarters(statements_df,'netincome',seed,length)
netcashfromoperatingactivities=self.get_sum_quarters(statements_df,'netcashfromoperatingactivities',seed,length)
start_assets=self.get_value(statements_df,'cashandequivalents',seed-length)
end_assets=self.get_value(statements_df,'cashandequivalents',seed)
if pd.isnull([start_assets,end_assets]).any():
return None
totalassets=np.mean([start_assets,end_assets])
if pd.isnull(totalassets):
return None
num=pd.Series([netincome,netcashfromoperatingactivities])
if pd.isnull(num).all():
return None
m=pd.Series([1,-1])
num=num.multiply(m).sum()
den=totalassets
if den==0:
return None
scaledtotalaccruals=num/den
return float(scaledtotalaccruals)
def get_grossmargin(self,statements_df,seed=-1,length=4):
totalrevenue=self.get_sum_quarters(statements_df, 'totalrevenue', seed, length)
totalcostofrevenue=self.get_sum_quarters(statements_df, 'totalcostofrevenue', seed, length)
if pd.isnull([totalrevenue,totalcostofrevenue]).any() or totalcostofrevenue==0:
return None
grossmargin=(totalrevenue-totalcostofrevenue)/totalcostofrevenue
return float(grossmargin)
def get_margingrowth(self,statements_df,seed=-1,length1=20,length2=4):
grossmargins=[]
for i in range(seed,seed-length1,-1):
grossmargins.append(self.get_grossmargin(statements_df, i, length2))
grossmargins=pd.Series(grossmargins)
if pd.isnull(grossmargins).any():
return None
growth=grossmargins.pct_change(periods=1)
growth=growth[pd.notnull(growth)]
if len(growth)==0:
return None
grossmargingrowth=stats.gmean(1+growth)-1
if pd.isnull(grossmargingrowth):
return None
return float(grossmargingrowth)
def get_marginstability(self,statements_df,seed=-1,length1=20,length2=4):
#length1=how far back to go, how many quarters to get 20 quarters
#length2=for each quarter, how far back to go 4 quarters
grossmargins=[]
for i in range(seed,seed-length1,-1):
grossmargins.append(self.get_grossmargin(statements_df, i, length2))
grossmargins= | pd.Series(grossmargins) | pandas.Series |
# import libraries
import sys
import re
import nltk
nltk.download(['stopwords','punkt','wordnet'])
from nltk import word_tokenize, sent_tokenize
from nltk.corpus import stopwords, wordnet
from nltk.stem.porter import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, precision_score, recall_score, f1_score, cohen_kappa_score
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.multioutput import MultiOutputClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
import timeit
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sqlalchemy import create_engine
import joblib
import pickle
def load_data(database_filepath):
# load data from database
qstring = 'sqlite:///'+database_filepath
engine = create_engine(qstring)
connection = engine.connect()
df = pd.read_sql_table('messages', connection)
X = df['message'].values
Y = df.drop(columns=['id','message','genre'],axis=1).values
category_names = df.columns[3:]
return X, Y, category_names
def tokenize(text):
tokens = word_tokenize(text)
tokens = [w for w in tokens if w not in stopwords.words("english")]
# initiate lemmatizer
lemmatizer = WordNetLemmatizer()
# iterate through each token
clean_tokens = []
for tok in tokens:
lemmed = lemmatizer.lemmatize(tok, pos='v')
# lemmatize, normalize case, and remove leading/trailing white space
clean_tok = re.sub(r"[^a-zA-Z0-9]"," ",lemmed.lower()).strip()
clean_tokens.append(clean_tok)
return clean_tokens
def display_results(y_test, y_pred, category_names):
list_of_reports = []
for col in range(len(category_names)):
report_metric = {}
y1 = y_test.T[col]
y2 = y_pred.T[col]
report_metric['precision'] = precision_score(y1,y2,'micro' )
report_metric['recall'] = recall_score(y1,y2,'micro')
report_metric['f1'] = f1_score(y1,y2,'micro')
report_metric['accuracy'] = accuracy_score(y1,y2)
report_metric['kappa'] = cohen_kappa_score(y1,y2)
list_of_reports.append(report_metric)
performance_metrics = | pd.DataFrame(list_of_reports) | pandas.DataFrame |
# coding: utf-8
# ### Importing libraries and magics
# In[1]:
import sys
import os
sys.path.append(os.getcwd()+"/tools/")
#from tester import test_classifier
# In[2]:
#Importing libraries and magics
import sys
from matplotlib.colors import ListedColormap
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
import seaborn as sns
import re
from sklearn.decomposition import PCA
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
from sklearn.feature_selection import SelectPercentile, SelectKBest
from sklearn.model_selection import GridSearchCV, StratifiedShuffleSplit
from sklearn.neighbors import KNeighborsClassifier, NearestCentroid
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MaxAbsScaler, StandardScaler
from sklearn.svm import LinearSVC, SVC
from sklearn.preprocessing import MinMaxScaler
#from tester import test_classifier
import warnings
# ### Import the file which contain the dataset to our variable
# In[3]:
# Load the dictionary containing the dataset
with open(os.getcwd()+"/final_project_dataset.pkl", "rb") as data_file:
data_init = pickle.load(data_file)
# ### Converting the dataset from a python dictionary to a pandas dataframe
# In[4]:
#Converting the dataset from a python dictionary to a pandas dataframe
data_df = pd.DataFrame.from_dict(data_init, orient='index')
raw_data = data_df.copy()
# #### Now check the structure of the new data frame to find out how many total number of observation and column are prsent
# In[5]:
data_df.shape
# #### Print the first 5 values of the data frame
# In[6]:
data_df.head()
# #### The dataset contains information of 21 features from 146 employees.
# We can see that column have some values as NaN.
# "NaN”s are actually strings so we will replace them with Numpy’s “NaN”s so that we can count the values which are not NaN
# across the variables (column).
# In[7]:
data_df.replace(to_replace='NaN', value=np.nan, inplace=True)
# In[8]:
data_df.count().sort_values()
# Above O/P represent the count of the values in each columns of the data frame
# We can observe that the dataset is quite sparse with some variables like Total Payments and Total Stock Value having values for most of the employees but some others like Loan Advances and Director Fees that we have information for too few employees.
# We want to find out the records in the data frame which have the mising values. From the above observation we can see that POI variable has the value for all the 146 employees.So we can drop this variable from our data frame.
# ##### We can also remove the email_address field since we cannot use it somehow in the analysis and we will create a temporary copy without the label (POI).
# In[9]:
#dropping 'poi' and 'email_address' variables
data_df = data_df.drop(["email_address"], axis=1)
data_temp = data_df.drop(["poi"], axis=1)
data_temp[data_temp.isnull().all(axis=1)]
# <b>LOCKHART EUGENE E </b> is the employe in the teporary data frame which has all the missing value for all the variable so we can drop this employee records from the original data fram
# In[10]:
data_df = data_df.drop(["LOCKHART EUGENE E"], axis=0)
# #### Next, since some values are related we would like to rearrange the columns in he following order:
# In[11]:
cols = [
'poi', 'salary', 'bonus', 'long_term_incentive', 'deferred_income',
'deferral_payments', 'loan_advances', 'other', 'expenses', 'director_fees',
'total_payments', 'exercised_stock_options', 'restricted_stock',
'restricted_stock_deferred', 'total_stock_value',
'from_poi_to_this_person', 'shared_receipt_with_poi', 'to_messages',
'from_this_person_to_poi', 'from_messages'
]
data_df = data_df[cols]
data_df.head()
# In[12]:
data_df.replace(to_replace=np.NaN, value=0, inplace=True)
# In[13]:
data_df.head()
# #### Now that the features are in the right order, we can examine the statistics of the dataset.
# In[14]:
data_df.describe()
# ## Outlier Investigation
# My first attempt to spot any possible outliers will be visual.
# We will use Seaborn’s pairplot which present in the same time the distribution of the variables and a scatter plot representation of them
# We are using 4 variables ("total_payments", "exercised_stock_options", "restricted_stock", "total_stock_value") from the data set to plot the graph as these variables has the heighest variance.
# In[15]:
sns.pairplot(data=data_df, vars=["total_payments", "exercised_stock_options", "restricted_stock", "total_stock_value"], hue="poi")
# #### There are two datapoints far away from the cluster of the rest. We will use the Total Payments to find them.
# In[16]:
data_df.total_payments.nlargest(2)
# In[17]:
data_df.loc[['TOTAL']]
# #### The first one ‘TOTAL’, is the totals on the Payments Schedule and not a person so it should be removed.
# The second one is not an outlier, it is just the huge payment and stock value o Kenneth Lay. Datapoints like this are not outliers; in fact anomalies like this may lead us to the rest of the POIs. These extreme values lead the rest of the employees to the bottom left corner of the scatterplot.
# In[18]:
data_df.drop("TOTAL", inplace=True)
# In[19]:
sns.pairplot(data=data_df, vars=["total_payments", "exercised_stock_options", "restricted_stock", "total_stock_value"], hue="poi")
# #### With the “TOTAL” removed the scatter plots are much more uncluttered and we can see some trends on them.
# We can notice also a negative value on Restricted Stock variable, an indication that more outliers may exist.
# We can make a first sanity by checking if the individual values sum with the totals of each category (Total Payments, Total Stock Value).
# In[20]:
print(data_df.sum()[1:11])
print("---")
print("Sum all 'payment' variables:", sum(data_df.sum()[1:10]))
# In[21]:
print(data_df.sum()[11:15])
print("---")
print("Sum all 'stock' variables:", sum(data_df.sum()[11:14]))
# #### We can see that the totals do not match. We need to check each employee by employee data to find the errors.
# Now to find the error we will find the sum of all the variable from column 2-11 and compare with the value of the 11th variable ("Total_payment").And if the value doenst match we will list those emplyees in the new list.
# Same way we will be comparing the sum of all the variables from column 12-15 and cross check with values of column 15("Total Stock").And if the value doesnt match we will list the emplyee detail in the list
# In[22]:
alist = []
for line in data_df.itertuples():
if sum(line[2:11]) != line[11] or sum(line[12:15]) != line[15]:
alist.append(line[0])
data_df.loc[alist]
# In[23]:
data_df.loc["<NAME>", :]
# In[24]:
data_df.loc["<NAME>", :] = [
False, 0, 0, 0, 0, -102500, 3285, 0, 0, 102500, 3285, -44093, 0, 44093, 0,
0, 0, 0, 0, 0
]
data_df.loc["<NAME>", :] = [
False, 0, 0, 0, 0, 0, 137864, 0, 0, 0, 137864, -2604490, 15456290, 2604490,
15456290, 0, 463, 523, 1, 29
]
# #### Now that we do not have any more outliers we can plot the two aggregated variables, Total Payments and Total Stock Value.
# In[25]:
fig1, ax = plt.subplots()
for poi, data in data_df.groupby(by="poi"):
ax.plot(data['total_payments'],data['total_stock_value'],'o', label=poi)
ax.legend()
plt.xscale('symlog')
plt.yscale('symlog')
plt.xlabel("Total Payments")
plt.ylabel("Total Stock Value")
plt.show()
# We can see that there are some persons with zero salary or bonus (or both) and none of them is a POI. Since we have a sparse number of POIs it might be beneficial to remove them to have a more dense dataset. We will create a copy of the dataset with the specific persons removed for future evaluation.
# In[26]:
data_nbs = data_df[data_df.salary > 0]
data_nbs = data_nbs[data_nbs.bonus > 0]
data_nbs.shape
# We can notice that the indexes / names in the dataset are in the form Sirname Name Initial. We will search all the indexes using regular expressions and print the indexes that do not follow this pattern.
# In[27]:
for index in data_df.index:
if re.match('^[A-Z]+\s[A-Z]+(\s[A-Z])?$', index):
continue
else:
print(index)
# In[28]:
data_df.loc[["THE TRAVEL AGENCY IN THE PARK"]]
# There is a “suspicious” index. The THE TRAVEL AGENCY IN THE PARK, isn’t obviously a name of an employee.So we need to drop it from the data set.
# In[29]:
data_df = data_df.drop(["THE TRAVEL AGENCY IN THE PARK"], axis=0)
# #### As a final step in outlier investigation, We need to search for extreme values.The extreme values is an essential information and they should be kept but let’s spot them first.
# We will be using Tukey Fences with 3 IQRs for every single feature.
# In[30]:
def outliers_iqr(dataframe, features):
result = set()
for feature in features:
ys = dataframe[[feature]]
quartile_1, quartile_3 = np.percentile(ys, [25, 75])
iqr = quartile_3 - quartile_1
lower_bound = int(round(quartile_1 - (iqr * 3)))
upper_bound = int(round(quartile_3 + (iqr * 3)))
partial_result = list(np.where((ys > upper_bound) | (ys < lower_bound))[0])
print(feature, len(partial_result))
result.update(partial_result)
print("-----------------------------------------------------")
print("")
print("Total number of records with extreme values: " + str(len(result)))
return list(result)
# In[31]:
cols.remove("poi")
xtr_values =outliers_iqr(data_df, cols)
# In[32]:
a = data_df.loc[:, "poi"].value_counts()
poi_density = a[1]/(a[0]+a[1])
print("POI density on the original dataset: " + str(poi_density))
a = data_df.ix[xtr_values, "poi"].value_counts()
poi_density_xtr = a[1]/(a[0]+a[1])
poi_density_xtr = a[1]/(a[0]+a[1])
print("POI density on the subset with the extreme values: " + str(poi_density_xtr))
print("Difference: " +str((poi_density_xtr - poi_density) / poi_density))
# We see that in the subset of employees with extreme value to at least one variable, there are 28% more POIs than in the general dataset. This justify our intuition that the extreme values are related with being a POI, thus we will not remove them.
# Now that the dataset is clear of outliers we can find the final dimensions and split the labels from the features and have a first scoring as a baseline for the rest of the analysis. I will use the LinearSVC classifier which seems the more appropriate to begin.
# In[33]:
data_df.shape
# In[34]:
data_df.loc[:, "poi"].value_counts()
# In[35]:
def do_split(data):
X = data.copy()
#Removing the poi labels and put them in a separate array, transforming it
#from True / False to 0 / 1
y = X.pop("poi").astype(int)
return X, y,
# In[36]:
X, y = do_split(data_nbs)
# In[37]:
#test_classifier() demands the dataset in a dictionary and the features labels
#in a list with 'poi' first.
features = data_df.columns.tolist()
data = data_df.to_dict(orient='index')
# test_classifier(LinearSVC(random_state=42), data, features)
# We are interested in the ability of the classifier not to label as Person Of Interest (POI) a person that is not, and also to find all the POIs so the metrics that we are most interested are Precision and Recall. Since we want to maximize both in the same time we will try to maximize the F1 score which can be interpreted as a weighted average of the precision and recall.
# We can see that the initial scores are very low with the LinearSVC classifier being poor in classifying the right persons.
# So now we will apply the transforming features on the variables and add them to the data set.We will find the propotion of each of the variable and try to add them in the data frame and compare the original variable with new transforming feature variable in the data set
# ## Optimize Feature Selection/Engineering
# #### In some cases the value of a variable is less important than its proportion to an aggregated value.
# In[38]:
data = data_df.copy()
data.loc[:, "salary_p"] = data.loc[:, "salary"]/data.loc[:, "total_payments"]
data.loc[:, "deferral_payments_p"] = data.loc[:, "deferral_payments"]/data.loc[:, "total_payments"]
data.loc[:, "loan_advances_p"] = data.loc[:, "loan_advances"]/data.loc[:, "total_payments"]
data.loc[:, "bonus_p"] = data.loc[:, "bonus"]/data.loc[:, "total_payments"]
data.loc[:, "deferred_income_p"] = data.loc[:, "deferred_income"]/data.loc[:, "total_payments"]
data.loc[:, "expenses_p"] = data.loc[:, "expenses"]/data.loc[:, "total_payments"]
data.loc[:, "other_p"] = data.loc[:, "other"]/data.loc[:, "total_payments"]
data.loc[:, "long_term_incentive_p"] = data.loc[:, "long_term_incentive"]/data.loc[:, "total_payments"]
data.loc[:, "director_fees_p"] = data.loc[:, "director_fees"]/data.loc[:, "total_payments"]
data.loc[:, "restricted_stock_deferred_p"] = data.loc[:, "restricted_stock_deferred"]/data.loc[:, "total_stock_value"]
data.loc[:, "exercised_stock_options_p"] = data.loc[:, "exercised_stock_options"]/data.loc[:, "total_stock_value"]
data.loc[:, "restricted_stock_p"] = data.loc[:, "restricted_stock"]/data.loc[:, "total_stock_value"]
data.loc[:, "from_poi_to_this_person_p"] = data.loc[:, "from_poi_to_this_person"]/data.loc[:, "to_messages"]
data.loc[:, "shared_receipt_with_poi_p"] = data.loc[:, "shared_receipt_with_poi"]/data.loc[:, "to_messages"]
data.loc[:, "from_this_person_to_poi_p"] = data.loc[:, "from_this_person_to_poi"]/data.loc[:, "from_messages"]
data.replace(to_replace=np.NaN, value=0, inplace=True)
data.replace(to_replace=np.inf, value=0, inplace=True)
data.replace(to_replace=-np.inf, value=0, inplace=True)
# ### Now we can plot the importance of the features of the “enriched” dataset by using the same classifier.
# In[39]:
def plot_importance(dataset):
X, y = do_split(dataset)
selector = SelectPercentile(percentile=100)
a = selector.fit(X, y)
plt.figure(figsize=(12,9))
sns.barplot(y=X.columns, x=a.scores_)
# In[40]:
plot_importance(data)
# Comparing the newly created features with the original we can see that the proportions of “Long Term Incentive”, “Restricted Stock Deferred” and “From This Person to POI” score higher than the original features. We will keep these and remove the original values. to avoid bias the model towards a specific feature by using both the original value and its proportion.
# In[41]:
#Adding the proportions
data_df.loc[:, "long_term_incentive_p"] = data_df.loc[:, "long_term_incentive"]/data_df.loc[:, "total_payments"]
data_df.loc[:, "restricted_stock_deferred_p"] = data_df.loc[:, "restricted_stock_deferred"]/data_df.loc[:, "total_stock_value"]
data_df.loc[:, "from_this_person_to_poi_p"] = data_df.loc[:, "from_this_person_to_poi"]/data_df.loc[:, "from_messages"]
#Removing the original values.
data_df.drop("long_term_incentive", axis=1)
data_df.drop("restricted_stock_deferred", axis=1)
data_df.drop("from_this_person_to_poi", axis=1)
#Correcting NaN and infinity values created by zero divisions
data_df.replace(to_replace=np.NaN, value=0, inplace=True)
data_df.replace(to_replace=np.inf, value=0, inplace=True)
data_df.replace(to_replace=-np.inf, value=0, inplace=True)
# In[42]:
plot_importance(data_df)
# ### Sequential Exception Technique (SET):
#
# Identify the POIs using SET and print their names.
# In[43]:
SET_data = data_df.copy()
# In[44]:
cols = [0,11,12,13,14,15,16,17,18,19,20,21,22]
SET_data.drop(SET_data.columns[cols], axis=1, inplace=True)
# In[45]:
SET_data.head()
# In[46]:
cols = [
'salary', 'bonus', 'long_term_incentive', 'deferred_income',
'deferral_payments', 'loan_advances', 'other', 'expenses', 'director_fees',
'total_payments'
]
# In[47]:
scaler = MinMaxScaler()
SET_data[cols] = scaler.fit_transform(SET_data[cols])
# In[48]:
e_names = pd.Series(SET_data.index)
# In[ ]:
def SET(m,SET_data):
# Set the value of parameter m = the no. of iterations you require
Card = pd.Series(np.NAN)
DS=pd.Series(np.NAN)
idx_added = pd.Series(np.NAN)
pos = 0
for j in range(1,m+1):
new_indices = np.random.choice(e_names.index,len(e_names),replace=False)
for i in pd.Series(new_indices).index:
idx_added[i+pos] = new_indices[i]
DS[i+pos]=sum(np.var(SET_data.loc[e_names[new_indices[:i+1]]]))
Card[i+pos] = len(e_names[:i+1])
pos = pos+i+1
df = | pd.DataFrame({'Index_added':idx_added,'DS':DS,'Card':Card}) | pandas.DataFrame |
__author__ = "<NAME>"
__license__ = "Apache 2"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__website__ = "https://llp.berkeley.edu/asgari/"
__git__ = "https://github.com/ehsanasgari/"
__email__ = "<EMAIL>"
__project__ = "1000Langs -- Super parallel project at CIS LMU"
import sys
import pandas as pd
sys.path.append('../')
from utility.file_utility import FileUtility
import requests
import codecs
import json
from multiprocessing import Pool
import tqdm
import collections
from pandas import Series
class BDPAPl(object):
'''
PBC retrieving from the bible digital platform
'''
def __init__(self, key, output_path):
'''
Constructor
'''
# set the parameters
self.key = key
self.output_path = output_path
FileUtility.ensure_dir(self.output_path + '/api_intermediate/')
FileUtility.ensure_dir(self.output_path + '/reports/')
self.to_double_check=list()
# check the API connection
response = requests.get('https://dbt.io/api/apiversion?key=' + self.key + '&v=2')
if response.status_code != 200:
print('Enter a correct API code')
return False
else:
response = json.loads(response.content)
print('Connected successfully to the bible digital platform v ' + response['Version'])
self.load_book_map()
def create_BPC(self, nump=20,update_meta_data=False, override=False, repeat=4):
'''
Creating PBC
'''
# update metadata file through api call
if update_meta_data:
self.update_meta_data()
# read the metadata file and create the dataframe
for line in codecs.open('../meta/api_volumes.txt','r','utf-8'):
books=json.loads(line)
books_filtered=([x for x in books if x['media']=='text'])
df=pd.DataFrame(books_filtered)
df['version'] = df[['version_code','volume_name']].apply(lambda x: ' # '.join(x), axis=1)
df['trans_ID']=df['fcbh_id'].str[0:6]
self.df=df[['language_iso','trans_ID','fcbh_id','language_english','language_name','version']]
# bible retrieval
self.id2iso_dict = Series(self.df['language_iso'].values, index=self.df['trans_ID']).to_dict()
self.id2langeng_dict = Series(self.df['language_english'].values, index=self.df['trans_ID']).to_dict()
self.id2lang_dict = | Series(self.df['language_name'].values, index=self.df['trans_ID']) | pandas.Series |
import numpy as np
import pandas as pd
import pytest
import scipy.stats
from pyextremes import EVA, get_model
@pytest.fixture(scope="function")
def eva_model(battery_wl_preprocessed) -> EVA:
return EVA(data=battery_wl_preprocessed)
@pytest.fixture(scope="function")
def eva_model_bm(battery_wl_preprocessed) -> EVA:
eva_model = EVA(data=battery_wl_preprocessed)
eva_model.get_extremes(
method="BM",
extremes_type="high",
block_size="1Y",
errors="raise",
)
return eva_model
@pytest.fixture(scope="function")
def eva_model_pot(battery_wl_preprocessed) -> EVA:
eva_model = EVA(data=battery_wl_preprocessed)
eva_model.get_extremes(
method="POT",
extremes_type="high",
threshold=1.35,
r="24H",
)
return eva_model
@pytest.fixture(scope="function")
def eva_model_bm_mle(battery_wl_preprocessed) -> EVA:
eva_model = EVA(data=battery_wl_preprocessed)
eva_model.get_extremes(
method="BM",
extremes_type="high",
block_size="1Y",
errors="raise",
)
eva_model.fit_model("MLE")
return eva_model
@pytest.fixture(scope="function")
def eva_model_bm_emcee(battery_wl_preprocessed) -> EVA:
eva_model = EVA(data=battery_wl_preprocessed)
eva_model.get_extremes(
method="BM",
extremes_type="high",
block_size="1Y",
errors="raise",
)
eva_model.fit_model("Emcee", n_walkers=10, n_samples=100)
return eva_model
@pytest.fixture(scope="function")
def eva_model_pot_mle(battery_wl_preprocessed) -> EVA:
eva_model = EVA(data=battery_wl_preprocessed)
eva_model.get_extremes(
method="POT",
extremes_type="high",
threshold=1.35,
r="24H",
)
eva_model.fit_model("MLE")
return eva_model
class TestEVA:
def test_init_errors(self):
with pytest.raises(
TypeError, match=r"invalid type.*'data' argument.*pandas.Series"
):
EVA(data=1)
with pytest.warns(RuntimeWarning, match=r"'data'.*not numeric.*converted"):
eva_model = EVA(
data=pd.Series(
data=["1", "2", "3"],
index=pd.DatetimeIndex(["2020", "2021", "2022"]),
)
)
assert np.allclose(eva_model.data.values, [1, 2, 3])
with pytest.raises(TypeError, match=r"invalid dtype.*'data' argument.*numeric"):
EVA(
data=pd.Series(
data=["a", "b", "c"],
index=pd.DatetimeIndex(["2020", "2021", "2022"]),
)
)
with pytest.raises(TypeError, match=r"index of 'data'.*date-time.*not"):
EVA(data= | pd.Series(data=[1, 2, 3], index=["2020", "2021", "2022"]) | pandas.Series |
import unittest
import attrdict as ad
import pandas as pd
# our imports
import emission.core.wrapper.motionactivity as ecwm
import emission.analysis.intake.segmentation.section_segmentation_methods.flip_flop_detection as eaissf
# Test imports
import emission.tests.common as etc
class TestFlipFlopDetection(unittest.TestCase):
def test_GetStreakOne(self):
ffd = eaissf.FlipFlopDetection([], None)
flip_flop_list = [False, False, False, True, True, False, False, False]
sss_list = ffd.get_streaks(flip_flop_list)
self.assertEqual(sss_list, [(3, 4)])
def test_GetStreakMixed(self):
ffd = eaissf.FlipFlopDetection([], None)
flip_flop_list = [False, False, True, False, False, False, False, True, False, True, False, True, True, False, False]
sss_list = ffd.get_streaks(flip_flop_list)
self.assertEqual(sss_list, [(2,2), (7,7), (9,9), (11, 12)])
def test_GetStreakLast(self):
ffd = eaissf.FlipFlopDetection([], None)
flip_flop_list = [False, False, True, False, False, False, False, True, False, True, False, True, True]
sss_list = ffd.get_streaks(flip_flop_list)
self.assertEqual(sss_list, [(2,2), (7,7), (9,9), (11,11)])
def test_GetStreakOneFF(self):
ffd = eaissf.FlipFlopDetection([], None)
flip_flop_list = [False]
sss_list = ffd.get_streaks(flip_flop_list)
self.assertEqual(sss_list, [])
flip_flop_list = [True]
sss_list = ffd.get_streaks(flip_flop_list)
self.assertEqual(sss_list, [(0,0)])
flip_flop_list = [True, False]
sss_list = ffd.get_streaks(flip_flop_list)
self.assertEqual(sss_list, [(0,0)])
flip_flop_list = [False, True]
sss_list = ffd.get_streaks(flip_flop_list)
self.assertEqual(sss_list, [(1,1)])
flip_flop_list = [True, False, True, True]
sss_list = ffd.get_streaks(flip_flop_list)
self.assertEqual(sss_list, [(0,0), (2,2)])
def test_ShouldMerge(self):
ffd = eaissf.FlipFlopDetection([(ad.AttrDict({'idx': 'a', 'ts': 100}),
ad.AttrDict({'idx': 'b', 'ts': 200}))], None)
sm = ffd.should_merge(0, 0)
self.assertEqual(sm.direction, eaissf.Direction.NONE)
def test_MergeStreaksPass1(self):
ffd = eaissf.FlipFlopDetection([], None)
unmerged_change_list = [({'idx': 'a'}, {'idx': 'b'}),
({'idx': 'b'}, {'idx': 'c'}),
({'idx': 'c'}, {'idx': 'd'}),
({'idx': 'd'}, {'idx': 'e'}),
({'idx': 'e'}, {'idx': 'f'}),
({'idx': 'f'}, {'idx': 'g'}),
({'idx': 'g'}, {'idx': 'h'}),
({'idx': 'h'}, {'idx': 'i'})]
forward_merged_list = [
ad.AttrDict({"start": 1, "end": 2, "final_mode": ecwm.MotionTypes.IN_VEHICLE}),
ad.AttrDict({"start": 6, "end": 6, "final_mode": ecwm.MotionTypes.WALKING})
]
backward_merged_list = [
ad.AttrDict({"start": 4, "end": 4, "final_mode": ecwm.MotionTypes.BICYCLING})
]
ret_list = ffd.merge_streaks_pass_1(unmerged_change_list, forward_merged_list,
backward_merged_list, [])
self.assertEqual(ret_list, [({'idx': 'a'}, {'idx': 'd'}),
({'idx': 'd'}, {'idx': 'e'}),
({'idx': 'e'}, {'idx': 'h'}),
({'idx': 'h'}, {'idx': 'i'})])
unmerged_change_list = [({'idx': 'a'}, {'idx': 'b'}),
({'idx': 'b'}, {'idx': 'c'}),
({'idx': 'c'}, {'idx': 'd'}),
({'idx': 'd'}, {'idx': 'e'}),
({'idx': 'e'}, {'idx': 'f'}),
({'idx': 'f'}, {'idx': 'g'}),
({'idx': 'g'}, {'idx': 'h'}),
({'idx': 'h'}, {'idx': 'i'}),
({'idx': 'k'}, {'idx': 'l'}),
({'idx': 'm'}, {'idx': 'n'}),
({'idx': 'o'}, {'idx': 'p'})]
forward_merged_list = [
ad.AttrDict({"start": 1, "end": 2, "final_mode": ecwm.MotionTypes.IN_VEHICLE}),
ad.AttrDict({"start": 6, "end": 6, "final_mode": ecwm.MotionTypes.WALKING})
]
backward_merged_list = [
ad.AttrDict({"start": 4, "end": 4, "final_mode": ecwm.MotionTypes.BICYCLING})
]
new_merged_list = [
ad.AttrDict({"start": 8, "end": 10, "final_mode": ecwm.MotionTypes.BICYCLING})
]
ret_list = ffd.merge_streaks_pass_1(unmerged_change_list, forward_merged_list,
backward_merged_list, new_merged_list)
self.assertEqual(ret_list, [({'idx': 'a'}, {'idx': 'd'}),
({'idx': 'd'}, {'idx': 'e'}),
({'idx': 'e'}, {'idx': 'h'}),
({'idx': 'h'}, {'idx': 'i'}),
({'idx': 'k', 'type': ecwm.MotionTypes.BICYCLING}, {'idx': 'p'})])
def test_GetSectionSpeed(self):
ffd = eaissf.FlipFlopDetection([], None)
loc_points = pd.DataFrame()
with_speed_loc_points = pd.DataFrame()
points_before = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from catboost import Pool
import shap
import numpy as np
import sys
from plotly.offline import init_notebook_mode
from IPython.core.display import display, HTML
import plotly.express as px
from catboost import CatBoostRegressor
import math
from sklearn.metrics import mean_absolute_error
import plotly.graph_objs as go
import plotly.io as pio
pd.set_option('display.max_columns', None)
np.set_printoptions(precision=3)
#shows all entries in an array
np.set_printoptions(threshold=sys.maxsize)
# Plotly
init_notebook_mode(connected=True)
pio.renderers.default = "svg"
display(HTML("<style>.container { width:65% !important; }</style>"))
print('libraries loaded')
# read in csv files
df_2019 = pd.read_csv('../calgary_weather_data/en_climate_daily_AB_3031094_2019_P1D.csv')
df_2020 = pd.read_csv('../calgary_weather_data/en_climate_daily_AB_3031094_2020_P1D.csv')
df_2021 = pd.read_csv('../calgary_weather_data/en_climate_daily_AB_3031094_2021_P1D.csv')
print('dfs loaded')
display(df_2019.head())
display(len(df_2019))
display(df_2020.head())
display(len(df_2020))
display(df_2021.head())
display(len(df_2021))
# combine all dfs into one
df = df_2019
df = df.append(df_2020)
df = df.append(df_2021)
df.reset_index(inplace=True)
# confirm new df row count is same as the other 3 dfs
print('New df Row Count:',len(df))
print('New df row counts are correct:', len(df)== (len(df_2021)+len(df_2020)+len(df_2019)))
# quick describe including categorical features
df.describe(include='all')
df.columns
blank_columns = ['Data Quality','Total Rain (mm)','Total Rain Flag', 'Total Snow (cm)', 'Total Snow Flag' ,'Snow on Grnd Flag']
df.drop(labels = blank_columns,axis = 1, inplace= True)
df.groupby(by = 'Dir of Max Gust Flag').count()
df.dtypes
df['Date/Time'] = pd.to_datetime(arg = df['Date/Time'], format = '%Y-%m-%d')
df['Date/Time'].dtypes
# look for NaNs, missing values, or duplicates
data = df
print('There are %i NaNs' % data.isna().sum().sum())
print('There are %i missing values' % data.isnull().sum().sum())
if data.duplicated().any() == False:
print("There are no duplicate rows")
else:
print('There are duplicate rows')
print(data.duplicated())
### MISSING DATA TABLE
total = data.isnull().sum().sort_values(ascending=False)
percent = (data.isnull().sum() / data.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total_count', 'Percent'])
missing_data
#define missing temp data df
df_miss_temp = df[df['Max Temp (°C)'].isna()]
df_miss_temp
df_miss_temp.describe(include = 'all', datetime_is_numeric=True)
# plot all dates vs temp
pio.renderers.default = "browser"
data = df
fig = px.line(data, x="Date/Time", y="Max Temp (°C)")
fig.show()
# make df where rows with blank Max Temp is inputted with a large number to see where it's blank
df_max_temp = df.copy()
df_max_temp.loc[np.isnan(df['Max Temp (°C)']), 'Max Temp (°C)'] = 75
data = df_max_temp
fig = px.line(data, x="Date/Time", y="Max Temp (°C)")
fig.show()
last_date = '2021-03-16'
drop_index = df[df['Date/Time'] > last_date].index
df.drop(index=drop_index, inplace= True)
df.reset_index(drop = True, inplace=True)
len(df[df['Date/Time'] > last_date])
# check new blank row counts
data = df
### MISSING DATA TABLE
total = data.isnull().sum().sort_values(ascending=False)
percent = (data.isnull().sum() / data.isnull().count()).sort_values(ascending=False)
missing_data = | pd.concat([total, percent], axis=1, keys=['Total_count', 'Percent']) | pandas.concat |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core import ops
from pandas.errors import NullFrequencyError
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = pd.timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with datetime-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(pd.timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
# ------------------------------------------------------------------
# Operations with timedelta-like others (including DateOffsets)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - NaT
tm.assert_equal(res, expected)
class TestTimedeltaArraylikeMulDivOps(object):
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly returns "
"m8[ns] instead of f8",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_td64arr_floordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = td1 // scalar_td
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly casts to f8",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_td64arr_rfloordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = scalar_td // td1
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns m8[ns] dtype "
"instead of f8",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_td64arr_rfloordiv_tdscalar_explicit(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with timedelta-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="__mul__ op treats "
"timedelta other as i8; "
"rmul OK",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_td64arr_mul_tdscalar_invalid(self, box, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with tm.assert_raises_regex(TypeError, pattern):
td1 * scalar_td
with tm.assert_raises_regex(TypeError, pattern):
scalar_td * td1
# ------------------------------------------------------------------
# Operations with numeric others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object-dtype",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)])
def test_td64arr_mul_numeric_scalar(self, box, one, tdser):
# GH#4521
# divide/multiply by integers
expected = Series(['-59 Days', '-59 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
result = tdser * (-one)
tm.assert_equal(result, expected)
result = (-one) * tdser
tm.assert_equal(result, expected)
expected = Series(['118 Days', '118 Days', 'NaT'],
dtype='timedelta64[ns]')
expected = tm.box_expected(expected, box)
result = tdser * (2 * one)
tm.assert_equal(result, expected)
result = (2 * one) * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object-dtype",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('two', [2, 2.0, np.array(2), np.array(2.0)])
def test_td64arr_div_numeric_scalar(self, box, two, tdser):
# GH#4521
# divide/multiply by integers
expected = Series(['29.5D', '29.5D', 'NaT'], dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
result = tdser / two
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])])
def test_td64arr_mul_numeric_array(self, box, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
expected = Series(['1180 Days', '1770 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
# TODO: Make this up-casting more systematic?
box = Series if (box is pd.Index and type(vector) is Series) else box
expected = tm.box_expected(expected, box)
result = tdser * vector
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
def test_td64arr_rmul_numeric_array(self, box, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
expected = Series(['1180 Days', '1770 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
box = Series if (box is pd.Index and type(vector) is Series) else box
expected = tm.box_expected(expected, box)
result = vector * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])])
def test_td64arr_div_numeric_array(self, box, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
expected = Series(['2.95D', '1D 23H 12m', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
box = Series if (box is pd.Index and type(vector) is Series) else box
expected = tm.box_expected(expected, box)
result = tdser / vector
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
vector / tdser
# TODO: Should we be parametrizing over types for `ser` too?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_mul_int_series(self, box, names):
# GH#19042 test for correct name attachment
tdi = TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'],
name=names[0])
ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1])
expected = Series(['0days', '1day', '4days', '9days', '16days'],
dtype='timedelta64[ns]',
name=names[2])
tdi = tm.box_expected(tdi, box)
box = Series if (box is pd.Index and type(ser) is Series) else box
expected = tm.box_expected(expected, box)
result = ser * tdi
tm.assert_equal(result, expected)
# The direct operation tdi * ser still needs to be fixed.
result = ser.__rmul__(tdi)
tm.assert_equal(result, expected)
# TODO: Should we be parametrizing over types for `ser` too?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pd.DataFrame
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_float_series_rdiv_td64arr(self, box, names):
# GH#19042 test for correct name attachment
# TODO: the direct operation TimedeltaIndex / Series still
# needs to be fixed.
tdi = TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'],
name=names[0])
ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1])
expected = Series([tdi[n] / ser[n] for n in range(len(ser))],
dtype='timedelta64[ns]',
name=names[2])
tdi = tm.box_expected(tdi, box)
box = Series if (box is pd.Index and type(ser) is Series) else box
expected = tm.box_expected(expected, box)
result = ser.__rdiv__(tdi)
if box is pd.DataFrame:
# TODO: Should we skip this case sooner or test something else?
assert result is NotImplemented
else:
tm.assert_equal(result, expected)
class TestTimedeltaArraylikeInvalidArithmeticOps(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="raises ValueError "
"instead of TypeError",
strict=True))
])
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
| Timedelta('5m4s') | pandas.Timedelta |
from typing import List
import logging
import numpy
import pandas as pd
from libs.datasets.timeseries import TimeseriesDataset
from libs.datasets.population import PopulationDataset
from libs.datasets import data_source
from libs.datasets import dataset_utils
_logger = logging.getLogger(__name__)
def fill_missing_county_with_city(row):
"""Fills in missing county data with city if available.
"""
if | pd.isnull(row.county) | pandas.isnull |
# -*- coding: utf-8 -*-
'''
:author <NAME>
:licence MIT
'''
import pandas as pd
import time
def raw2meta_extract(fn):
"""
Reasds raw2 files including GPS and enginerring information
Parameters
----------
fn : string
Path and filenmae of *.raw2 file
Returns
-------
data : pandas DataFrame
CTD (Salinity, Temperature, Fluorescence, Pressure), Pitch and Roll, Compass information
gps : pandas DataFrame
GPS position information
zoog : pandas DataFrame
Zoocam grayscale values
"""
pgain = 0.04
poff = -10
tgain = 0.001
toff = -5
sgain = 0.001
soff = -1
delta_t = 8
#get file index
print(time.ctime() + ": Processing "+fn)
print(time.ctime() + ": Generating file index...")
with open(fn) as f:
list2 = [row.split()[0] for row in f]
##########################################
#read files
##########################################
f = open(fn)
raw2 = f.readlines()
f.close()
print(time.ctime() + ": Loading CF_DIVE")
##########################################
# CF_DIVE 0F
##########################################
'''
This packet marks the present:
Nsurf = Dive-Set Number
Ncyc = Cycle Number
Npro = the profile number
uxti0 = the UNIX time that the Dive-Set
uxti1 = The Unix time this specific cycle began
For the 0901 code, the Dive-Set Number is only incremented after
surface communications (GPS and SBD) are attempted (multiple cycles
between surface communications will not increment the Dive-Set
Number, but will increment the Cycle Number).
This packet should be used to set Nsurf, Ncyc, Npro for all
proceeding packets, until the next CF_DIVE packet is encountered.
'''
cf_dive_idx = [i for i, j in enumerate(list2) if j == '0f']
cf_dive_raw = [raw2[i].split() for i in cf_dive_idx]
cf_dive = pd.DataFrame(cf_dive_raw)
cf_dive = cf_dive.iloc[:,1:]
cf_dive.columns = ['Nsurf','Ncyc','Npro','uxti0','uxti1','Dow','Month',
'day','Time','Year']
cf_dive = cf_dive.astype(dtype = {'Nsurf':'int64','Ncyc':'int64',
'Npro':'int64','uxti0':'int64',
'uxti1':'int64'})
##########################################
# CF_PDAT 11
##########################################
print(time.ctime() + ": Loading CF_PDAT")
edat_idx = [i for i, j in enumerate(list2) if j == '11']
edat_raw = [raw2[i].split() for i in edat_idx]
edat = pd.DataFrame(edat_raw)
edat = edat.iloc[:,1:9]
edat.columns = ['Nsurf','Ncyc','Npro','time','pressure','temperature',
'salinity','fluorescence']
edat = edat.astype(dtype = {'Nsurf':'int64','Ncyc': 'int64','Npro': 'int64',
'time':'float','pressure':'float',
'temperature':'float','salinity':'float',
'fluorescence':'float'} )
edat['pressure']=edat['pressure'] * pgain + poff #pressure as a double; step 1 of conversion
#still need to find pmin and do p=p-pmin to convert to dBar
sal_cond = edat['salinity'] > 0
edat.loc[sal_cond, 'salinity'] = edat.loc[sal_cond,'salinity'] * sgain + soff
sal_cond = edat['temperature'] > 0
edat.loc[sal_cond, 'temperature'] = edat.loc[sal_cond,'temperature'] * tgain + toff
for var in ['salinity','temperature','fluorescence']:
cond = edat[var] <= 0
edat.loc[cond, var] = float('nan')
edat = pd.merge(edat,cf_dive)
edat['Dive_start_time'] = pd.to_datetime(edat.uxti0, unit='s')
edat['Dive_start_time'] = edat['Dive_start_time'].dt.tz_localize('UTC')
#add time_of_measure
edat['time_of_measure'] = edat['Dive_start_time'] + pd.to_timedelta(edat['time'].astype('str') + 'seconds')
#edat.time_of_measure = edat.time_of_measure.dt.tz_localize('UTC')
edat['time_of_measure_PDT'] = edat.time_of_measure - | pd.to_timedelta(delta_t, unit='hours') | pandas.to_timedelta |
'''
example of loading FinMind api
'''
from FinMind.Data import Load
import requests
import pandas as pd
url = 'http://finmindapi.servebeer.com/api/data'
list_url = 'http://finmindapi.servebeer.com/api/datalist'
translate_url = 'http://finmindapi.servebeer.com/api/translation'
'''----------------TaiwanStockInfo----------------'''
form_data = {'dataset':'TaiwanStockInfo'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockPrice----------------'''
form_data = {'dataset':'TaiwanStockPrice',
'stock_id':'2317',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockPriceMinute----------------'''
form_data = {'dataset':'TaiwanStockPriceMinute',
'stock_id':'2330',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------FinancialStatements----------------'''
form_data = {'dataset':'FinancialStatements',
'stock_id':'2317',
'date':'2019-01-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data = Load.transpose(data)
data.head()
'''----------------TaiwanCashFlowsStatement----------------'''
form_data = {'dataset':'TaiwanCashFlowsStatement',
'stock_id':'2330',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = | pd.DataFrame(temp['data']) | pandas.DataFrame |
import ibis
from pandas import read_csv
from pandas.core.frame import DataFrame
import pytest
from sql_to_ibis import register_temp_table, remove_temp_table
from sql_to_ibis.tests.utils import (
DATA_PATH,
MULTI_LOOKUP,
MULTI_MAIN,
MULTI_PROMOTION,
MULTI_PROMOTION_NO_OVERLAP,
MULTI_RELATIONSHIP,
get_all_join_columns_handle_duplicates,
)
scope_fixture = pytest.fixture(scope="session")
@scope_fixture
def pandas_client():
return ibis.pandas.PandasClient({})
@scope_fixture
def digimon_mon_list(pandas_client):
frame = read_csv(DATA_PATH / "DigiDB_digimonlist.csv")
frame["mon_attribute"] = frame["Attribute"]
return ibis.pandas.from_dataframe(
frame,
"DIGIMON_MON_LIST",
pandas_client,
)
@scope_fixture
def digimon_move_list(pandas_client):
frame = read_csv(DATA_PATH / "DigiDB_movelist.csv")
frame["move_attribute"] = frame["Attribute"]
return ibis.pandas.from_dataframe(frame, "DIGIMON_MOVE_LIST", pandas_client)
@scope_fixture
def forest_fires(pandas_client):
return ibis.pandas.from_dataframe(
read_csv(DATA_PATH / "forestfires.csv"), "FOREST_FIRES", pandas_client
)
@scope_fixture
def avocado(pandas_client):
return ibis.pandas.from_dataframe(
read_csv(DATA_PATH / "avocado.csv"), "AVOCADO", pandas_client
)
@scope_fixture
def time_data(pandas_client):
return ibis.pandas.from_dataframe(
| read_csv(DATA_PATH / "time_data.csv") | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.cm as cm
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
import matplotlib.transforms as transforms
import matplotlib.colors as colors
import os
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
#------------------------------------------------------------------------------
# Motivación codigo -----------------------------------------------------------
"""
Relacion y juate de sta de los datos de potencia y de radiacion solar
"""
################################################################################
## ----------ACOTANDO LAS FECHAS POR DIA Y MES PARA TOMAR LOS DATOS---------- ##
################################################################################
fi_m = 3
fi_d = 23
ff_m = 12
ff_d = 20
##############################################################################
## ----------------LECTURA DE LOS DATOS DE LOS EXPERIMENTOS---------------- ##
##############################################################################
# df_P975 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel975.txt', sep=',', index_col =0)
# df_P350 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel350.txt', sep=',', index_col =0)
# df_P348 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel348.txt', sep=',', index_col =0)
df_P975 = pd.read_csv('/Users/cmcuervol/Dropbox/Codes/NathyTesis/Panel975.txt', sep=',', index_col =0)
df_P350 = pd.read_csv('/Users/cmcuervol/Dropbox/Codes/NathyTesis/Panel350.txt', sep=',', index_col =0)
df_P348 = | pd.read_csv('/Users/cmcuervol/Dropbox/Codes/NathyTesis/Panel348.txt', sep=',', index_col =0) | pandas.read_csv |
from collections import OrderedDict
import numpy as np
import os
import pandas as pd
import warnings
from tqdm import tqdm
from . import quality_metrics
# from .wrappers import * # Except calculate_pc_metrics and calculate_metrics - they will be replaced below
def calculate_isi_violations(spike_times, spike_clusters, isi_threshold, min_isi):
viol_rates = []
viol_ns = []
for cluster_id in np.unique(spike_clusters):
viol_rate, viol_n = quality_metrics.isi_violations(spike_times[spike_clusters == cluster_id],
min_time=np.min(spike_times),
max_time=np.max(spike_times),
isi_threshold=isi_threshold,
min_isi=min_isi)
viol_rates.append(viol_rate)
viol_ns.append(viol_n)
return np.array(viol_rates), np.array(viol_ns)
def calculate_n_spikes(spike_clusters):
return np.array([sum(spike_clusters == cluster_id) for cluster_id in np.unique(spike_clusters)])
def calculate_presence_ratio(spike_times, spike_clusters):
"""
:param spike_times:
:param spike_clusters:
:param total_units:
:return:
"""
cluster_ids = np.unique(spike_clusters)
ratios = []
for idx, cluster_id in enumerate(cluster_ids):
for_this_cluster = (spike_clusters == cluster_id)
ratios.append(quality_metrics.presence_ratio(spike_times[for_this_cluster],
min_time=np.min(spike_times),
max_time=np.max(spike_times)))
return np.array(ratios)
def calculate_firing_rate(spike_times, spike_clusters):
"""
:param spike_times:
:param spike_clusters:
:param total_units:
:return:
"""
cluster_ids = np.unique(spike_clusters)
firing_rates = []
for idx, cluster_id in enumerate(cluster_ids):
for_this_cluster = (spike_clusters == cluster_id)
firing_rates.append(quality_metrics.firing_rate(spike_times[for_this_cluster],
min_time=np.min(spike_times),
max_time=np.max(spike_times)))
return np.array(firing_rates)
def calculate_amplitude_cutoff(spike_clusters, amplitudes):
"""
:param spike_clusters:
:param amplitudes:
:param total_units:
:return:
"""
cluster_ids = np.unique(spike_clusters)
amplitude_cutoffs = []
for idx, cluster_id in enumerate(cluster_ids):
for_this_cluster = (spike_clusters == cluster_id)
amplitude_cutoffs.append(quality_metrics.amplitude_cutoff(amplitudes[for_this_cluster]))
return np.array(amplitude_cutoffs)
def calculate_pc_metrics(spike_clusters,
spike_templates,
total_units,
pc_features,
pc_feature_ind,
num_channels_to_compare,
max_spikes_for_cluster,
max_spikes_for_nn,
n_neighbors,
do_parallel=True):
"""
:param spike_clusters:
:param total_units:
:param pc_features:
:param pc_feature_ind:
:param num_channels_to_compare:
:param max_spikes_for_cluster:
:param max_spikes_for_nn:
:param n_neighbors:
:return:
"""
assert (num_channels_to_compare % 2 == 1)
half_spread = int((num_channels_to_compare - 1) / 2)
cluster_ids = np.unique(spike_clusters)
template_ids = np.unique(spike_templates)
template_peak_channels = np.zeros((len(template_ids),), dtype='uint16')
cluster_peak_channels = np.zeros((len(cluster_ids),), dtype='uint16')
for idx, template_id in enumerate(template_ids):
for_template = np.squeeze(spike_templates == template_id)
pc_max = np.argmax(np.mean(pc_features[for_template, 0, :], 0))
template_peak_channels[idx] = pc_feature_ind[template_id, pc_max]
for idx, cluster_id in enumerate(cluster_ids):
for_unit = np.squeeze(spike_clusters == cluster_id)
templates_for_unit = np.unique(spike_templates[for_unit])
template_positions = np.where(np.isin(template_ids, templates_for_unit))[0]
cluster_peak_channels[idx] = np.median(template_peak_channels[template_positions])
# Loop over clusters:
if do_parallel:
from joblib import Parallel, delayed
meas = Parallel(n_jobs=-1, verbose=1)( # -1 means use all cores
# delayed(Wrappers.calculate_pc_metrics_one_cluster_old) # Function
# (template_peak_channels, cluster_id, half_spread, pc_features, pc_feature_ind, spike_clusters, # Inputs
# max_spikes_for_cluster, max_spikes_for_nn, n_neighbors)
delayed(calculate_pc_metrics_one_cluster) # Function
(cluster_peak_channels, idx, cluster_id, cluster_ids,
half_spread, pc_features, pc_feature_ind,
spike_clusters, spike_templates,
max_spikes_for_cluster, max_spikes_for_nn, n_neighbors)
for idx, cluster_id in enumerate(cluster_ids)) # Loop
else:
meas = []
for idx, cluster_id in tqdm(enumerate(cluster_ids), total=cluster_ids.max(), desc='PC metrics'): # Loop
# meas.append(Wrappers.calculate_pc_metrics_one_cluster_old(
# template_peak_channels, cluster_id, half_spread, pc_features, pc_feature_ind, spike_clusters,
# max_spikes_for_cluster, max_spikes_for_nn, n_neighbors))
meas.append(calculate_pc_metrics_one_cluster( # Function
cluster_peak_channels, idx, cluster_id, cluster_ids,
half_spread, pc_features, pc_feature_ind,
spike_clusters, spike_templates,
max_spikes_for_cluster, max_spikes_for_nn, n_neighbors))
# Unpack:
isolation_distances = []
l_ratios = []
d_primes = []
nn_hit_rates = []
nn_miss_rates = []
for mea in meas:
isolation_distance, d_prime, nn_miss_rate, nn_hit_rate, l_ratio = mea
isolation_distances.append(isolation_distance)
d_primes.append(d_prime)
nn_miss_rates.append(nn_miss_rate)
nn_hit_rates.append(nn_hit_rate)
l_ratios.append(l_ratio)
return (np.array(isolation_distances), np.array(l_ratios), np.array(d_primes),
np.array(nn_hit_rates), np.array(nn_miss_rates))
def get_unit_pcs(unit_id,
spike_clusters,
spike_templates,
pc_feature_ind,
pc_features,
channels_to_use,
subsample):
""" Return PC features for one unit
Inputs:
-------
unit_id : Int
ID for this unit
spike_clusters : np.ndarray
Cluster labels for each spike
spike_templates : np.ndarry
Template labels for each spike
pc_feature_ind : np.ndarray
Channels used for PC calculation for each unit
pc_features : np.ndarray
Array of all PC features
channels_to_use : np.ndarray
Channels to use for calculating metrics
subsample : Int
maximum number of spikes to return
Output:
-------
unit_PCs : numpy.ndarray (float)
PCs for one unit (num_spikes x num_PCs x num_channels)
"""
inds_for_unit = np.where(spike_clusters == unit_id)[0]
spikes_to_use = np.random.permutation(inds_for_unit)[:subsample]
unique_template_ids = np.unique(spike_templates[spikes_to_use])
unit_PCs = []
for template_id in unique_template_ids:
index_mask = spikes_to_use[np.squeeze(spike_templates[spikes_to_use]) == template_id]
these_inds = pc_feature_ind[template_id, :]
pc_array = []
for i in channels_to_use:
if np.isin(i, these_inds):
channel_index = np.argwhere(these_inds == i)[0][0]
pc_array.append(pc_features[index_mask, :, channel_index])
else:
return None
unit_PCs.append(np.stack(pc_array, axis=-1))
if len(unit_PCs) > 0:
return np.concatenate(unit_PCs)
else:
return None
def calculate_pc_metrics_one_cluster(cluster_peak_channels, idx, cluster_id, cluster_ids,
half_spread, pc_features, pc_feature_ind,
spike_clusters, spike_templates,
max_spikes_for_cluster, max_spikes_for_nn, n_neighbors):
peak_channel = cluster_peak_channels[idx]
num_spikes_in_cluster = np.sum(spike_clusters == cluster_id)
half_spread_down = peak_channel \
if peak_channel < half_spread \
else half_spread
half_spread_up = np.max(pc_feature_ind) - peak_channel \
if peak_channel + half_spread > np.max(pc_feature_ind) \
else half_spread
channels_to_use = np.arange(peak_channel - half_spread_down, peak_channel + half_spread_up + 1)
units_in_range = cluster_ids[np.isin(cluster_peak_channels, channels_to_use)]
if units_in_range.size < 2:
warnings.warn('Not enough overlap with peak channels. '
'Try increasing "num_channels_to_compare" parameter. Not calculating pc metrics')
return tuple(np.repeat(np.nan, 5))
spike_counts = np.zeros(units_in_range.shape)
for idx2, cluster_id2 in enumerate(units_in_range):
spike_counts[idx2] = np.sum(spike_clusters == cluster_id2)
if num_spikes_in_cluster > max_spikes_for_cluster:
relative_counts = spike_counts / num_spikes_in_cluster * max_spikes_for_cluster
else:
relative_counts = spike_counts
all_pcs = np.zeros((0, pc_features.shape[1], channels_to_use.size))
all_labels = np.zeros((0,))
for idx2, cluster_id2 in enumerate(units_in_range):
subsample = int(relative_counts[idx2])
pcs = get_unit_pcs(cluster_id2, spike_clusters, spike_templates,
pc_feature_ind, pc_features, channels_to_use,
subsample)
if pcs is not None and pcs.ndim == 3:
labels = np.ones((pcs.shape[0],)) * cluster_id2
all_pcs = np.concatenate((all_pcs, pcs), 0)
all_labels = np.concatenate((all_labels, labels), 0)
elif cluster_id2 == cluster_id:
warnings.warn(f'No PCs for Cluster {cluster_id} in channels {channels_to_use}! feature metrics will be nan')
return tuple(np.repeat(np.nan, 5))
# Check no fewer than 20 spikes in this cluster
if sum(all_labels == cluster_id) < 20:
warnings.warn(f'Fewer than 20 spikes in cluster {cluster_id}! feature metrics will be nan')
return tuple(np.repeat(np.nan, 5))
all_pcs = np.reshape(all_pcs, (all_pcs.shape[0], pc_features.shape[1] * channels_to_use.size))
if ((all_pcs.shape[0] > 10)
and (cluster_id in all_labels) # Not all labels are in this cluster
and (len(channels_to_use) > 0)):
isolation_distance, l_ratio = quality_metrics.mahalanobis_metrics(all_pcs, all_labels, cluster_id)
d_prime = quality_metrics.lda_metrics(all_pcs, all_labels, cluster_id)
nn_hit_rate, nn_miss_rate = quality_metrics.nearest_neighbors_metrics(all_pcs, all_labels,
cluster_id,
max_spikes_for_nn,
n_neighbors)
return isolation_distance, d_prime, nn_miss_rate, nn_hit_rate, l_ratio
else: # Too few spikes or cluster doesnt exist
# Make warnings
if all_pcs.shape[0] < 10:
warnings.warn(f'Less than 10 pcs in {cluster_id}! feature metrics will be nan')
elif not (cluster_id in all_labels):
warnings.warn(f'Not all labels are in cluster {cluster_id}! feature metrics will be nan')
return tuple(np.repeat(np.nan, 5))
def calculate_metrics(spike_times, spike_clusters, spike_templates, amplitudes, pc_features, pc_feature_ind,
output_folder=None,
do_parallel=True, do_pc_features=True, do_silhouette=True, do_drift=True,
isi_threshold=0.0015,
min_isi=0.000166,
num_channels_to_compare=5,
max_spikes_for_unit=1500,
max_spikes_for_nn=20000,
n_neighbors=4,
n_silhouette=20000,
drift_metrics_interval_s=51,
drift_metrics_min_spikes_per_interval=10
):
""" Calculate metrics for all units on one probe
from mmy.input_output import spike_io
ksort_folder = '~/res_ss_full/res_ss/tcloop_train_m022_1553627381_'
spike_times, spike_clusters, spike_templates, amplitudes, templates, channel_map, clusterIDs, cluster_quality, pc_features, pc_feature_ind = \
spike_io.QualityMetrics.load_kilosort_data(ksort_folder, 3e4, False, include_pcs=True)
metrics = QualityMetrics.calculate_metrics(spike_times, spike_clusters, amplitudes, pc_features, pc_feature_ind, ksort_folder)
Inputs:
------
spike_times : numpy.ndarray (num_spikes x 0)
Spike times in seconds (same timebase as epochs)
spike_clusters : numpy.ndarray (num_spikes x 0)
Cluster IDs for each spike time
amplitudes : numpy.ndarray (num_spikes x 0)
Amplitude value for each spike time
channel_map : numpy.ndarray (num_units x 0)
Original data channel for pc_feature_ind array
pc_features : numpy.ndarray (num_spikes x num_pcs x num_channels)
Pre-computed PCs for blocks of channels around each spike
pc_feature_ind : numpy.ndarray (num_units x num_channels)
Channel indices of PCs for each unit
epochs : list of Epoch objects
contains information on Epoch start and stop times
params : dict of parameters
'isi_threshold' : minimum time for isi violations
Outputs:
--------
metrics : pandas.DataFrame
one column for each metric
one row per unit per epoch
"""
# ==========================================================
# MAIN:
# ==========================================================
cluster_ids = np.unique(spike_clusters)
total_units = len(np.unique(spike_clusters))
n_spikes_per_cluster = calculate_n_spikes(spike_clusters)
print(f'Found {total_units} clusters. {sum(n_spikes_per_cluster > 20)} of them have >20 spikes')
isi_viol_rate, isi_viol_n = calculate_isi_violations(spike_times, spike_clusters, isi_threshold, min_isi)
presence_ratio = calculate_presence_ratio(spike_times, spike_clusters, )
firing_rate = calculate_firing_rate(spike_times, spike_clusters, )
amplitude_cutoff = calculate_amplitude_cutoff(spike_clusters, amplitudes, )
metrics = pd.DataFrame(data=OrderedDict((('cluster_id', cluster_ids),
('firing_rate', firing_rate),
('presence_ratio', presence_ratio),
('isi_viol_rate', isi_viol_rate),
('isi_viol_n', isi_viol_n),
('amplitude_cutoff', amplitude_cutoff),)))
if do_pc_features:
print("Calculating PC-based metrics")
try:
(isolation_distance, l_ratio,
d_prime, nn_hit_rate, nn_miss_rate) = calculate_pc_metrics(spike_clusters,
spike_templates,
total_units,
pc_features,
pc_feature_ind,
num_channels_to_compare,
max_spikes_for_unit,
max_spikes_for_nn,
n_neighbors,
do_parallel=do_parallel)
except Exception as e:
num_channels_to_compare += 6
print(f'Hit error {e}.\n Increasing num_channels_to_compare to {num_channels_to_compare} and retrying')
(isolation_distance, l_ratio,
d_prime, nn_hit_rate, nn_miss_rate) = calculate_pc_metrics(spike_clusters,
spike_templates,
total_units,
pc_features,
pc_feature_ind,
num_channels_to_compare,
max_spikes_for_unit,
max_spikes_for_nn,
n_neighbors,
do_parallel=do_parallel)
metrics0 = pd.DataFrame(data=OrderedDict((('isolation_distance', isolation_distance),
('l_ratio', l_ratio),
('d_prime', d_prime),
('nn_hit_rate', nn_hit_rate),
('nn_miss_rate', nn_miss_rate)
)))
metrics = pd.concat([metrics, metrics0], axis=1)
if do_silhouette:
print("Calculating silhouette score")
the_silhouette_score = quality_metrics.calculate_silhouette_score(spike_clusters,
spike_templates,
total_units,
pc_features,
pc_feature_ind,
n_silhouette,
do_parallel=do_parallel)
metrics2 = pd.DataFrame(data=OrderedDict((('silhouette_score', the_silhouette_score),)),
index=range(len(the_silhouette_score)))
metrics = pd.concat([metrics, metrics2], axis=1)
if do_drift:
print("Calculating drift metrics")
max_drift, cumulative_drift = quality_metrics.calculate_drift_metrics(spike_times,
spike_clusters,
spike_templates,
pc_features,
pc_feature_ind,
drift_metrics_interval_s,
drift_metrics_min_spikes_per_interval,
do_parallel=do_parallel)
metrics3 = pd.DataFrame(data=OrderedDict((('max_drift', max_drift),
('cumulative_drift', cumulative_drift),
)))
metrics = | pd.concat([metrics, metrics3], axis=1) | pandas.concat |
import pandas as pd
from sklearn import linear_model
import statsmodels.api as sm
import numpy as np
from scipy import stats
df_all = pd.read_csv("/mnt/nadavrap-students/STS/data/imputed_data2.csv")
# df_all = pd.read_csv("/tmp/pycharm_project_723/new data sum info surg and Hosp numeric values.csv")
# # print(df_all.columns.tolist())
# # print (df_all.count())
# # print (df_all['Mortalty'].isnull().sum())
# # print (df_all['Mortalty'].value_counts())
# def refactor_categorical_values_to_numeric_values(df, col_names):
# # df = df.filter(col_names, axis=1)
# for col in col_names:
# try:
# df = df.replace({col: {False: 0, True: 1}})
# df = df.replace({col: {"No": 0, "Yes": 1}})
# df = df.replace({col: {"Male": 0, "Female": 1}})
# df = df.replace({col: {"Elective": 0, "Urgent": 1}})
# df = df.replace({col: {"Non-Hispanic": 0, "Hispanic": 1}})
# df = df.replace({col: {"Previous Non-CAB": 0, "Previous CAB": 1}})
# df = df.replace({col: {"None/Trivial/Trace/Mild": 0, "Moderate/Severe": 1}})
# df = df.replace({col: {"Unknown": 1, "Alive": 1, "Dead": 0}})
# df = df.replace({col: {"First cardiovascular surgery": 0, "NA - Not a cardiovascular surgery": 0,
# "First re-op cardiovascular surgery": 0, "Second re-op cardiovascular surgery": 1,
# "Third re-op cardiovascular surgery": 1,
# "Fourth or more re-op cardiovascular surgery": 1}})
# df = df.replace({col: {"Never smoker": 0, "Smoker": 1}})
# df = df.replace({col: {"I/II": 0, "III/IV": 1}})
# df = df.replace({col: {"None": 0, "One": 1, "Two": 2, "Three": 3}})
# except:
# x = "none"
# print(df.shape)
# df.to_csv("/tmp/pycharm_project_723/new data sum info surg and Hosp numeric values.csv")
#
df_all = df_all.replace({'STSRCHOSPD':{False:0, True:1}})
df_all = df_all.replace({'Complics':{False:0, True:1}})
df_all = df_all.replace({'Mortality':{False:0, True:1}})
df_all = df_all.replace({'STSRCMM':{False:0, True:1}})
print (df_all['STSRCMM'].unique())
print (df_all['STSRCMM'].isna().sum())
df_all[:50].to_csv("all 50.csv")# def intersection(lst1, lst2):
# return list(set(lst1) & set(lst2))
#
#
# # list_vals = [ "Reoperation", "BMI", "Age", "Gender", "RaceCaucasian", "RaceBlack", "Ethnicity",
# # "RaceOther", "FHCAD", "Diabetes", "InsulinDiab", "Dyslip", "Dialysis", "Hypertn", "InfEndo",
# # "SmokingStatus", "ChrLungD", "ModSevereLungDis", "ImmSupp", "PVD", "DualAntiPlat", 'RenFail',
# # "CreatLst", 'PreCVAorTIAorCVD', "POCPCI", "PrevMI", "Angina", "UnstableAngina", "HeartFail",
# # "ClassNYHGroup", "Arrhythmia", "ArrhythAtrFibFlutter", "ArrhythOther", "MedACEI", "MedBeta",
# # "MedNitIV", "MedASA", "MedAntiplateltNoASA", "AntiCoag", "MedInotr", "MedSter", "HDEF", "EF<=35%",
# # "NumDisV", 'NumDisV_ordinal', "LeftMain", "VDInsufA", "VDStenA", "VDInsufM", "VDStenM", "VDInsufT",
# # "VDStenT", "Status", 'MedHeparin', 'Mortality', 'PrCVInt']
# # # list_val = ['PrCVInt']
# #
# #
# # # print (intersection(list2,list_vals))
# # test = df_all[:50]
# # refactor_categorical_values_to_numeric_values(test, list_vals)
# # test.rename(columns={"EF<=35%": "EF_less_equal_35"}, inplace=True)
# list2 = [ 'STSRCHOSPD', 'STSRCOM', 'STSRCDSWI', 'STSRCMM', 'STSRCPermStroke', 'STSRCProlvent', 'STSRcRenFail', 'STSRCreop',
# 'PLOS', 'PredMort', 'PredDeep', 'PredReop', 'PredStro', 'PredVent', 'PredRenF', 'PredMM', 'Pred6D', 'Pred14D'
# 'Age', 'Gender', 'RaceCaucasian', 'RaceBlack', 'RaceOther', 'Ethnicity', 'FHCAD', 'Diabetes', 'Hypertn',
# 'Dyslip', 'Dialysis', 'InfEndo', 'ChrLungD', 'ImmSupp', 'PVD', 'CreatLst', 'PrevMI', 'Arrhythmia', 'PrCVInt', 'prcab',
# 'prvalve', 'POCPCI', 'ProthCar', 'MedACEI', 'MedASA', 'MedBeta', 'MedInotr', 'MedNitIV', 'MedSter', 'NumDisV', 'HDEF',
# 'VDInsufA', 'VDStenA', 'VDInsufM', 'VDStenM', 'VDInsufT', 'VDStenT', 'Status', 'PerfusTm', 'XClampTm', 'DistVein', 'NumIMADA',
# 'NumRadDA', 'IABP', 'VentHrsTot', 'Complics', 'COpReBld', 'CPVntLng', 'CRenFail', 'HeartFail', 'Incidenc', 'Reoperation',
# 'SmokingStatus', 'InsulinDiab', 'ModSevereLungDis', 'PreCVAorTIAorCVD', 'RenFail', 'Angina', 'UnstableAngina', 'ClassNYHGroup',
# 'ArrhythAtrFibFlutter', 'ArrhythOther', 'DualAntiPlat', 'MedHeparin', 'AntiCoag', 'MedAntiplateltNoASA', 'NumDisV_ordinal', 'EF<=35%',
# 'CPBUse', 'RadArtUsed', 'IMAGraftUsed', 'DistVeinDone', 'TotalNumberOfGrafts', 'LeftMain', 'CompleteRevas', 'MajorComps', 'PLOS14',
# 'postCVAorTIA', 'IntraPostBloodTrans', 'ICUHrsTotal', 'BMI']
# # list2.to_csv("test for numeric draft model.csv")
# refactor_categorical_values_to_numeric_values(df_all,list2)
# mask_reop = df_all['Reoperation'] == 'Reoperation'
# df_reop = df_all[mask_reop]
# df_all = df_all.replace({'Reoperation':{'First Time':0, 'Reoperation':1}})
mask = df_all['surgyear'] == 2010
df_2010 = df_all[mask]
mask = df_all['surgyear'] == 2011
df_2011 = df_all[mask]
mask = df_all['surgyear'] == 2012
df_2012 = df_all[mask]
mask = df_all['surgyear'] == 2013
df_2013 = df_all[mask]
mask = df_all['surgyear'] == 2014
df_2014 = df_all[mask]
mask = df_all['surgyear'] == 2015
df_2015 = df_all[mask]
mask = df_all['surgyear'] == 2016
df_2016 = df_all[mask]
mask = df_all['surgyear'] == 2017
df_2017 = df_all[mask]
mask = df_all['surgyear'] == 2018
df_2018 = df_all[mask]
mask = df_all['surgyear'] == 2019
df_2019 = df_all[mask]
# hospid_2019 = pd.DataFrame()
# mask = df_all['HospID'] == 100427
# df1 = df_all[mask]
# df1.to_csv('100427.csv')
# df2 = df1.groupby(['HospID','surgyear'])['HospID'].count().reset_index(name='total')
# print (df2.head(6))
def create_2019_df(df):
df1 = df.groupby('HospID')['HospID'].count().reset_index(name='total')
df2 = df.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='Reop')
df3 = df.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='FirstOperation')
dfmort = df.groupby('HospID')['MtOpD'].apply(lambda x: (x == 1).sum()).reset_index(name='Mortality_all')
mask_reop = df['Reoperation'] == 'Reoperation'
df_reop = df[mask_reop]
df_op = df[~mask_reop]
dfmortf = df_op.groupby('HospID')['MtOpD'].apply(lambda x: (x == 1).sum()).reset_index(name='Mortality_first')
dfmortr = df_reop.groupby('HospID')['MtOpD'].apply(lambda x: (x == 1).sum()).reset_index(name='Mortality_reop')
df_comp = df.groupby('HospID')['Complics'].apply(lambda x: (x == 1).sum()).reset_index(name='Complics_all')
df_compr = df_reop.groupby('HospID')['Complics'].apply(lambda x: (x == 1).sum()).reset_index(name='Complics_reop')
df_compf = df_op.groupby('HospID')['Complics'].apply(lambda x: (x == 1).sum()).reset_index(name='Complics_FirstOperation')
d1 = pd.merge(df1, df3, on='HospID', how='outer')
d2 = pd.merge(d1, df2, on='HospID', how='outer')
d3 = pd.merge(d2, dfmort, on='HospID', how='outer')
d4 = | pd.merge(d3, dfmortf, on='HospID', how='outer') | pandas.merge |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": pandas.StringDtype(),
"bitErrBucketArray7": pandas.StringDtype(),
"bitErrBucketArray8": pandas.StringDtype(),
"bitErrBucketArray9": pandas.StringDtype(),
"bitErrBucketArray10": pandas.StringDtype(),
"bitErrBucketArray11": pandas.StringDtype(),
"bitErrBucketArray12": pandas.StringDtype(),
"bitErrBucketArray13": pandas.StringDtype(),
"bitErrBucketArray14": pandas.StringDtype(),
"bitErrBucketArray15": pandas.StringDtype(),
"bitErrBucketArray16": pandas.StringDtype(),
"bitErrBucketArray17": pandas.StringDtype(),
"bitErrBucketArray18": pandas.StringDtype(),
"bitErrBucketArray19": pandas.StringDtype(),
"bitErrBucketArray20": pandas.StringDtype(),
"bitErrBucketArray21": pandas.StringDtype(),
"bitErrBucketArray22": pandas.StringDtype(),
"bitErrBucketArray23": pandas.StringDtype(),
"bitErrBucketArray24": pandas.StringDtype(),
"bitErrBucketArray25": pandas.StringDtype(),
"bitErrBucketArray26": pandas.StringDtype(),
"bitErrBucketArray27": pandas.StringDtype(),
"bitErrBucketArray28": pandas.StringDtype(),
"bitErrBucketArray29": pandas.StringDtype(),
"bitErrBucketArray30": pandas.StringDtype(),
"bitErrBucketArray31": pandas.StringDtype(),
"bitErrBucketArray32": pandas.StringDtype(),
"bitErrBucketArray33": pandas.StringDtype(),
"bitErrBucketArray34": pandas.StringDtype(),
"bitErrBucketArray35": pandas.StringDtype(),
"bitErrBucketArray36": pandas.StringDtype(),
"bitErrBucketArray37": pandas.StringDtype(),
"bitErrBucketArray38": pandas.StringDtype(),
"bitErrBucketArray39": pandas.StringDtype(),
"bitErrBucketArray40": pandas.StringDtype(),
"bitErrBucketArray41": pandas.StringDtype(),
"bitErrBucketArray42": pandas.StringDtype(),
"bitErrBucketArray43": pandas.StringDtype(),
"bitErrBucketArray44": pandas.StringDtype(),
"bitErrBucketArray45": pandas.StringDtype(),
"bitErrBucketArray46": pandas.StringDtype(),
"bitErrBucketArray47": pandas.StringDtype(),
"bitErrBucketArray48": pandas.StringDtype(),
"bitErrBucketArray49": pandas.StringDtype(),
"bitErrBucketArray50": pandas.StringDtype(),
"bitErrBucketArray51": pandas.StringDtype(),
"bitErrBucketArray52": pandas.StringDtype(),
"bitErrBucketArray53": pandas.StringDtype(),
"bitErrBucketArray54": pandas.StringDtype(),
"bitErrBucketArray55": pandas.StringDtype(),
"bitErrBucketArray56": pandas.StringDtype(),
"bitErrBucketArray57": pandas.StringDtype(),
"bitErrBucketArray58": pandas.StringDtype(),
"bitErrBucketArray59": pandas.StringDtype(),
"bitErrBucketArray60": pandas.StringDtype(),
"bitErrBucketArray61": pandas.StringDtype(),
"bitErrBucketArray62": pandas.StringDtype(),
"bitErrBucketArray63": pandas.StringDtype(),
"bitErrBucketArray64": pandas.StringDtype(),
"bitErrBucketArray65": pandas.StringDtype(),
"bitErrBucketArray66": pandas.StringDtype(),
"bitErrBucketArray67": pandas.StringDtype(),
"bitErrBucketArray68": pandas.StringDtype(),
"bitErrBucketArray69": pandas.StringDtype(),
"bitErrBucketArray70": pandas.StringDtype(),
"bitErrBucketArray71": pandas.StringDtype(),
"bitErrBucketArray72": pandas.StringDtype(),
"bitErrBucketArray73": pandas.StringDtype(),
"bitErrBucketArray74": pandas.StringDtype(),
"bitErrBucketArray75": pandas.StringDtype(),
"bitErrBucketArray76": pandas.StringDtype(),
"bitErrBucketArray77": pandas.StringDtype(),
"bitErrBucketArray78": pandas.StringDtype(),
"bitErrBucketArray79": pandas.StringDtype(),
"bitErrBucketArray80": pandas.StringDtype(),
"mrr_successDistribution1": pandas.StringDtype(),
"mrr_successDistribution2": pandas.StringDtype(),
"mrr_successDistribution3": pandas.StringDtype(),
"mrr_successDistribution4": pandas.StringDtype(),
"mrr_successDistribution5": pandas.StringDtype(),
"mrr_successDistribution6": pandas.StringDtype(),
"mrr_successDistribution7": pandas.StringDtype(),
"mrr_successDistribution8": pandas.StringDtype(),
"mrr_successDistribution9": pandas.StringDtype(),
"mrr_successDistribution10": pandas.StringDtype(),
"mrr_successDistribution11": pandas.StringDtype(),
"mrr_successDistribution12": pandas.StringDtype(),
"mrr_successDistribution13": pandas.StringDtype(),
"mrr_successDistribution14": pandas.StringDtype(),
"mrr_successDistribution15": pandas.StringDtype(),
"mrr_successDistribution16": pandas.StringDtype(),
"mrr_successDistribution17": pandas.StringDtype(),
"mrr_successDistribution18": pandas.StringDtype(),
"mrr_successDistribution19": pandas.StringDtype(),
"mrr_successDistribution20": pandas.StringDtype(),
"mrr_successDistribution21": pandas.StringDtype(),
"mrr_successDistribution22": pandas.StringDtype(),
"mrr_successDistribution23": pandas.StringDtype(),
"mrr_successDistribution24": pandas.StringDtype(),
"mrr_successDistribution25": pandas.StringDtype(),
"mrr_successDistribution26": pandas.StringDtype(),
"mrr_successDistribution27": pandas.StringDtype(),
"mrr_successDistribution28": pandas.StringDtype(),
"mrr_successDistribution29": pandas.StringDtype(),
"mrr_successDistribution30": pandas.StringDtype(),
"mrr_successDistribution31": pandas.StringDtype(),
"mrr_successDistribution32": pandas.StringDtype(),
"mrr_successDistribution33": pandas.StringDtype(),
"mrr_successDistribution34": pandas.StringDtype(),
"mrr_successDistribution35": pandas.StringDtype(),
"mrr_successDistribution36": pandas.StringDtype(),
"mrr_successDistribution37": pandas.StringDtype(),
"mrr_successDistribution38": pandas.StringDtype(),
"mrr_successDistribution39": pandas.StringDtype(),
"mrr_successDistribution40": pandas.StringDtype(),
"mrr_successDistribution41": pandas.StringDtype(),
"mrr_successDistribution42": pandas.StringDtype(),
"mrr_successDistribution43": pandas.StringDtype(),
"mrr_successDistribution44": pandas.StringDtype(),
"mrr_successDistribution45": pandas.StringDtype(),
"mrr_successDistribution46": pandas.StringDtype(),
"mrr_successDistribution47": pandas.StringDtype(),
"mrr_successDistribution48": pandas.StringDtype(),
"mrr_successDistribution49": pandas.StringDtype(),
"mrr_successDistribution50": pandas.StringDtype(),
"mrr_successDistribution51": pandas.StringDtype(),
"mrr_successDistribution52": pandas.StringDtype(),
"mrr_successDistribution53": pandas.StringDtype(),
"mrr_successDistribution54": pandas.StringDtype(),
"mrr_successDistribution55": pandas.StringDtype(),
"mrr_successDistribution56": pandas.StringDtype(),
"mrr_successDistribution57": pandas.StringDtype(),
"mrr_successDistribution58": pandas.StringDtype(),
"mrr_successDistribution59": pandas.StringDtype(),
"mrr_successDistribution60": pandas.StringDtype(),
"mrr_successDistribution61": pandas.StringDtype(),
"mrr_successDistribution62": pandas.StringDtype(),
"mrr_successDistribution63": pandas.StringDtype(),
"mrr_successDistribution64": pandas.StringDtype(),
"blDowngradeCount": pandas.StringDtype(),
"snapReads": pandas.StringDtype(),
"pliCapTestTime": pandas.StringDtype(),
"currentTimeToFreeSpaceRecovery": pandas.StringDtype(),
"worstTimeToFreeSpaceRecovery": pandas.StringDtype(),
"rspnandReads": pandas.StringDtype(),
"cachednandReads": pandas.StringDtype(),
"spnandReads": pandas.StringDtype(),
"dpnandReads": pandas.StringDtype(),
"qpnandReads": pandas.StringDtype(),
"verifynandReads": pandas.StringDtype(),
"softnandReads": pandas.StringDtype(),
"spnandWrites": pandas.StringDtype(),
"dpnandWrites": | pandas.StringDtype() | pandas.StringDtype |
import numpy as np
import pandas as pd
import sys
import pickle
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import pyqtgraph
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtTest import *
from Model_module import Model_module
from Data_module import Data_module
# from Sub_widget import another_result_explain
class Worker(QObject):
# Signal을 보낼 그릇을 생성# #############
train_value = pyqtSignal(object)
# nor_ab_value = pyqtSignal(object)
procedure_value = pyqtSignal(object)
verif_value = pyqtSignal(object)
timer = pyqtSignal(object)
symptom_db = pyqtSignal(object)
shap = pyqtSignal(object)
plot_db = pyqtSignal(object)
display_ex = pyqtSignal(object, object, object)
another_shap = pyqtSignal(object, object, object)
another_shap_table = pyqtSignal(object)
##########################################
@pyqtSlot(object)
def generate_db(self):
test_db = input('구현할 시나리오를 입력해주세요 : ')
print(f'입력된 시나리오 : {test_db}를 실행합니다.')
Model_module() # model module 내의 빈행렬 초기화
data_module = Data_module()
db, check_db = data_module.load_data(file_name=test_db) # test_db 불러오기
data_module.data_processing() # Min-Max o, 2 Dimension
liner = []
plot_data = []
normal_data = []
compare_data = {'Normal':[], 'Ab21-01':[], 'Ab21-02':[], 'Ab20-04':[], 'Ab15-07':[], 'Ab15-08':[], 'Ab63-04':[], 'Ab63-02':[], 'Ab21-12':[], 'Ab19-02':[], 'Ab21-11':[], 'Ab23-03':[], 'Ab60-02':[], 'Ab59-02':[], 'Ab23-01':[], 'Ab23-06':[]}
for line in range(np.shape(db)[0]):
QTest.qWait(0.01)
print(np.shape(db)[0], line)
data = np.array([data_module.load_real_data(row=line)])
liner.append(line)
check_data, check_parameter = data_module.load_real_check_data(row=line)
plot_data.append(check_data[0])
try: normal_data.append(normal_db.iloc[line])
except: pass
try: compare_data['Normal'].append(normal_db.iloc[line])
except: pass
try: compare_data['Ab21-01'].append(ab21_01.iloc[line])
except: pass
try: compare_data['Ab21-02'].append(ab21_02.iloc[line])
except: pass
try: compare_data['Ab20-04'].append(ab20_04.iloc[line])
except: pass
try: compare_data['Ab15-07'].append(ab15_07.iloc[line])
except: pass
try: compare_data['Ab15-08'].append(ab15_08.iloc[line])
except: pass
try: compare_data['Ab63-04'].append(ab63_04.iloc[line])
except: pass
try: compare_data['Ab63-02'].append(ab63_02.iloc[line])
except: pass
try: compare_data['Ab21-12'].append(ab21_12.iloc[line])
except: pass
try: compare_data['Ab19-02'].append(ab19_02.iloc[line])
except: pass
try: compare_data['Ab21-11'].append(ab21_11.iloc[line])
except: pass
try: compare_data['Ab23-03'].append(ab23_03.iloc[line])
except: pass
try: compare_data['Ab60-02'].append(ab60_02.iloc[line])
except: pass
try: compare_data['Ab59-02'].append(ab59_02.iloc[line])
except: pass
try: compare_data['Ab23-01'].append(ab23_01.iloc[line])
except: pass
try: compare_data['Ab23-06'].append(ab23_06.iloc[line])
except: pass
if np.shape(data) == (1, 10, 46):
dim2 = np.array(data_module.load_scaled_data(row=line - 9)) # 2차원 scale
# check_data, check_parameter = data_module.load_real_check_data(row=line - 8)
# plot_data.append(check_data[0])
train_untrain_reconstruction_error, train_untrain_error = model_module.train_untrain_classifier(data=data)
# normal_abnormal_reconstruction_error = model_module.normal_abnormal_classifier(data=data)
abnormal_procedure_result, abnormal_procedure_prediction, shap_add_des, shap_value = model_module.abnormal_procedure_classifier(data=dim2)
abnormal_verif_reconstruction_error, verif_threshold, abnormal_verif_error = model_module.abnormal_procedure_verification(data=data)
self.train_value.emit(train_untrain_error)
# self.nor_ab_value.emit(np.argmax(abnormal_procedure_result[line-9], axis=1)[0])
self.procedure_value.emit(np.argmax(abnormal_procedure_prediction, axis=1)[0])
self.verif_value.emit([abnormal_verif_error, verif_threshold])
self.timer.emit([line, check_parameter])
self.symptom_db.emit([np.argmax(abnormal_procedure_prediction, axis=1)[0], check_parameter])
self.shap.emit(shap_add_des)
self.plot_db.emit([liner, plot_data])
self.display_ex.emit(shap_add_des, [liner, plot_data], normal_data)
self.another_shap.emit(shap_value, [liner, plot_data], compare_data)
self.another_shap_table.emit(shap_value)
class AlignDelegate(QStyledItemDelegate):
def initStyleOption(self, option, index):
super(AlignDelegate, self).initStyleOption(option, index)
option.displayAlignment = Qt.AlignCenter
class Mainwindow(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Real-Time Abnormal Diagnosis for NPP")
self.setGeometry(150, 50, 1700, 800)
# 그래프 초기조건
pyqtgraph.setConfigOption("background", "w")
pyqtgraph.setConfigOption("foreground", "k")
#############################################
self.selected_para = pd.read_csv('./DataBase/Final_parameter.csv')
# GUI part 1 Layout (진단 부분 통합)
layout_left = QVBoxLayout()
# 영 번째 그룹 설정 (Time and Power)
gb_0 = QGroupBox("Training Status") # 영 번째 그룹 이름 설정
layout_left.addWidget(gb_0) # 전체 틀에 영 번째 그룹 넣기
gb_0_layout = QBoxLayout(QBoxLayout.LeftToRight) # 영 번째 그룹 내용을 넣을 레이아웃 설정
# 첫 번째 그룹 설정
gb_1 = QGroupBox("Training Status") # 첫 번째 그룹 이름 설정
layout_left.addWidget(gb_1) # 전체 틀에 첫 번째 그룹 넣기
gb_1_layout = QBoxLayout(QBoxLayout.LeftToRight) # 첫 번째 그룹 내용을 넣을 레이아웃 설정
# 두 번째 그룹 설정
gb_2 = QGroupBox('NPP Status')
layout_left.addWidget(gb_2)
gb_2_layout = QBoxLayout(QBoxLayout.LeftToRight)
# 세 번째 그룹 설정
gb_3 = QGroupBox(self)
layout_left.addWidget(gb_3)
gb_3_layout = QBoxLayout(QBoxLayout.LeftToRight)
# 네 번째 그룹 설정
gb_4 = QGroupBox('Predicted Result Verification')
layout_left.addWidget(gb_4)
gb_4_layout = QBoxLayout(QBoxLayout.LeftToRight)
# 다섯 번째 그룹 설정
gb_5 = QGroupBox('Symptom check in scenario')
layout_left.addWidget(gb_5)
gb_5_layout = QBoxLayout(QBoxLayout.TopToBottom)
# Spacer 추가
# layout_part1.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
# 영 번째 그룹 내용
self.time_label = QLabel(self)
self.power_label = QPushButton(self)
# 첫 번째 그룹 내용
# Trained / Untrained condition label
self.trained_label = QPushButton('Trained')
self.Untrained_label = QPushButton('Untrained')
# 두 번째 그룹 내용
self.normal_label = QPushButton('Normal')
self.abnormal_label = QPushButton('Abnormal')
# 세 번째 그룹 내용
self.name_procedure = QLabel('Number of Procedure: ')
self.num_procedure = QLineEdit(self)
self.num_procedure.setAlignment(Qt.AlignCenter)
self.name_scnario = QLabel('Name of Procedure: ')
self.num_scnario = QLineEdit(self)
self.num_scnario.setAlignment(Qt.AlignCenter)
# 네 번째 그룹 내용
self.success_label = QPushButton('Diagnosis Success')
self.failure_label = QPushButton('Diagnosis Failure')
# 다섯 번째 그룹 내용
self.symptom_name = QLabel(self)
self.symptom1 = QCheckBox(self)
self.symptom2 = QCheckBox(self)
self.symptom3 = QCheckBox(self)
self.symptom4 = QCheckBox(self)
self.symptom5 = QCheckBox(self)
self.symptom6 = QCheckBox(self)
# 영 번째 그룹 내용 입력
gb_0_layout.addWidget(self.time_label)
gb_0_layout.addWidget(self.power_label)
gb_0.setLayout(gb_0_layout)
# 첫 번째 그룹 내용 입력
gb_1_layout.addWidget(self.trained_label)
gb_1_layout.addWidget(self.Untrained_label)
gb_1.setLayout(gb_1_layout) # 첫 번째 레이아웃 내용을 첫 번째 그룹 틀로 넣기
# 두 번째 그룹 내용 입력
gb_2_layout.addWidget(self.normal_label)
gb_2_layout.addWidget(self.abnormal_label)
gb_2.setLayout(gb_2_layout)
# 세 번째 그룹 내용 입력
gb_3_layout.addWidget(self.name_procedure)
gb_3_layout.addWidget(self.num_procedure)
gb_3_layout.addWidget(self.name_scnario)
gb_3_layout.addWidget(self.num_scnario)
gb_3.setLayout(gb_3_layout)
# 네 번째 그룹 내용 입력
gb_4_layout.addWidget(self.success_label)
gb_4_layout.addWidget(self.failure_label)
gb_4.setLayout(gb_4_layout)
# 다섯 번째 그룹 내용 입력
gb_5_layout.addWidget(self.symptom_name)
gb_5_layout.addWidget(self.symptom1)
gb_5_layout.addWidget(self.symptom2)
gb_5_layout.addWidget(self.symptom3)
gb_5_layout.addWidget(self.symptom4)
gb_5_layout.addWidget(self.symptom5)
gb_5_layout.addWidget(self.symptom6)
gb_5.setLayout(gb_5_layout)
# Start 버튼 맨 아래에 위치
self.start_btn = QPushButton('Start')
# layout_part1.addWidget(self.start_btn)
self.tableWidget = QTableWidget(0, 0)
self.tableWidget.setFixedHeight(500)
self.tableWidget.setFixedWidth(800)
# Plot 구현
self.plot_1 = pyqtgraph.PlotWidget(title=self)
self.plot_2 = pyqtgraph.PlotWidget(title=self)
self.plot_3 = pyqtgraph.PlotWidget(title=self)
self.plot_4 = pyqtgraph.PlotWidget(title=self)
# Explanation Alarm 구현
red_alarm = QGroupBox('Main basis for diagnosis')
red_alarm_layout = QGridLayout()
orange_alarm = QGroupBox('Sub basis for diagnosis')
orange_alarm_layout = QGridLayout()
# Display Button 생성
self.red1 = QPushButton(self)
self.red2 = QPushButton(self)
self.red3 = QPushButton(self)
self.red4 = QPushButton(self)
self.orange1 = QPushButton(self)
self.orange2 = QPushButton(self)
self.orange3 = QPushButton(self)
self.orange4 = QPushButton(self)
self.orange5 = QPushButton(self)
self.orange6 = QPushButton(self)
self.orange7 = QPushButton(self)
self.orange8 = QPushButton(self)
self.orange9 = QPushButton(self)
self.orange10 = QPushButton(self)
self.orange11 = QPushButton(self)
self.orange12 = QPushButton(self)
# Layout에 widget 삽입
red_alarm_layout.addWidget(self.red1, 0, 0)
red_alarm_layout.addWidget(self.red2, 0, 1)
red_alarm_layout.addWidget(self.red3, 1, 0)
red_alarm_layout.addWidget(self.red4, 1, 1)
orange_alarm_layout.addWidget(self.orange1, 0, 0)
orange_alarm_layout.addWidget(self.orange2, 0, 1)
orange_alarm_layout.addWidget(self.orange3, 1, 0)
orange_alarm_layout.addWidget(self.orange4, 1, 1)
orange_alarm_layout.addWidget(self.orange5, 2, 0)
orange_alarm_layout.addWidget(self.orange6, 2, 1)
orange_alarm_layout.addWidget(self.orange7, 3, 0)
orange_alarm_layout.addWidget(self.orange8, 3, 1)
orange_alarm_layout.addWidget(self.orange9, 4, 0)
orange_alarm_layout.addWidget(self.orange10, 4, 1)
orange_alarm_layout.addWidget(self.orange11, 5, 0)
orange_alarm_layout.addWidget(self.orange12, 5, 1)
# Group Box에 Layout 삽입
red_alarm.setLayout(red_alarm_layout)
orange_alarm.setLayout(orange_alarm_layout)
# 각 Group Box를 상위 Layout에 삽입
layout_part1 = QVBoxLayout()
detail_part = QHBoxLayout()
detailed_table = QPushButton('Detail Explanation [Table]')
self.another_classification = QPushButton('Why other scenarios were not chosen')
detail_part.addWidget(detailed_table)
detail_part.addWidget(self.another_classification)
alarm_main = QVBoxLayout()
alarm_main.addWidget(red_alarm)
alarm_main.addWidget(orange_alarm)
layout_part1.addLayout(layout_left)
layout_part1.addLayout(alarm_main)
layout_part1.addLayout(detail_part)
layout_part1.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
# GUI part2 Layout (XAI 구현)
layout_part2 = QVBoxLayout()
layout_part2.addWidget(self.plot_1)
layout_part2.addWidget(self.plot_2)
layout_part2.addWidget(self.plot_3)
layout_part2.addWidget(self.plot_4)
# layout_part2.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
# layout_part2.addWidget(self.tableWidget)
# GUI part1 and part2 통합
layout_base = QHBoxLayout()
layout_base.addLayout(layout_part1)
layout_base.addLayout(layout_part2)
# GUI 최종 통합 (start button을 하단에 배치시키기 위함)
total_layout = QVBoxLayout()
total_layout.addLayout(layout_base)
total_layout.addWidget(self.start_btn)
self.setLayout(total_layout) # setLayout : 최종 출력될 GUI 화면을 결정
# Threading Part##############################################################################################################
# 데이터 연산 부분 Thread화
self.worker = Worker()
self.worker_thread = QThread()
# Signal을 Main Thread 내의 함수와 연결
self.worker.train_value.connect(self.Determine_train)
self.worker.procedure_value.connect(self.Determine_abnormal)
self.worker.procedure_value.connect(self.Determine_procedure)
self.worker.verif_value.connect(self.verifit_result)
self.worker.timer.connect(self.time_display)
self.worker.symptom_db.connect(self.procedure_satisfaction)
# self.worker.shap.connect(self.explain_result)
self.worker.plot_db.connect(self.plotting)
self.worker.display_ex.connect(self.display_explain)
self.worker.moveToThread(self.worker_thread) # Worker class를 Thread로 이동
# self.worker_thread.started.connect(lambda: self.worker.generate_db())
self.start_btn.clicked.connect(lambda: self.worker.generate_db()) # 누르면 For문 실행
self.worker_thread.start()
# Threading Part##############################################################################################################
# 이벤트 처리 ----------------------------------------------------------------------------------------------------
detailed_table.clicked.connect(self.show_table)
self.another_classification.clicked.connect(self.show_another_result)
# Button 클릭 연동 이벤트 처리
convert_red_btn = {0: self.red1, 1: self.red2, 2: self.red3, 3: self.red4} # Red Button
convert_red_plot = {0: self.red1_plot, 1: self.red2_plot, 2: self.red3_plot, 3: self.red4_plot} #
convert_orange_btn = {0: self.orange1, 1: self.orange2, 2: self.orange3, 3: self.orange4, 4: self.orange5,
5: self.orange6, 6: self.orange7, 7: self.orange8, 8: self.orange9, 9: self.orange10,
10: self.orange11, 11: self.orange12} # Orange Button
convert_orange_plot = {0: self.orange1_plot, 1: self.orange2_plot, 2: self.orange3_plot, 3: self.orange4_plot, 4: self.orange5_plot,
5: self.orange6_plot, 6: self.orange7_plot, 7: self.orange8_plot, 8: self.orange9_plot, 9: self.orange10_plot,
10: self.orange11_plot, 11: self.orange12_plot}
# 초기 Button 위젯 선언 -> 초기에 선언해야 끊기지않고 유지됨.
# Red Button
[convert_red_btn[i].clicked.connect(convert_red_plot[i]) for i in range(4)]
self.red_plot_1 = pyqtgraph.PlotWidget(title=self)
self.red_plot_2 = pyqtgraph.PlotWidget(title=self)
self.red_plot_3 = pyqtgraph.PlotWidget(title=self)
self.red_plot_4 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.red_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.red_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.red_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.red_plot_4.showGrid(x=True, y=True, alpha=0.3)
# Orange Button
[convert_orange_btn[i].clicked.connect(convert_orange_plot[i]) for i in range(12)]
self.orange_plot_1 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_2 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_3 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_4 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_5 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_6 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_7 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_8 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_9 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_10 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_11 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_12 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.orange_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_4.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_5.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_6.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_7.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_8.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_9.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_10.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_11.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_12.showGrid(x=True, y=True, alpha=0.3)
self.show() # UI show command
def time_display(self, display_variable):
# display_variable[0] : time, display_variable[1].iloc[1]
self.time_label.setText(f'<b>Time :<b/> {display_variable[0]} sec')
self.time_label.setFont(QFont('Times new roman', 15))
self.time_label.setAlignment(Qt.AlignCenter)
self.power_label.setText(f'Power : {round(display_variable[1].iloc[1]["QPROREL"]*100, 2)}%')
if round(display_variable[1].iloc[1]["QPROREL"]*100, 2) < 95:
self.power_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
else:
self.power_label.setStyleSheet('color : black;' 'background-color: light gray;')
def Determine_train(self, train_untrain_reconstruction_error):
if train_untrain_reconstruction_error[0] <= 0.00225299: # Trained Data
self.trained_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;')
self.Untrained_label.setStyleSheet('color : black;' 'background-color: light gray;')
else: # Untrianed Data
self.Untrained_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
self.trained_label.setStyleSheet('color : black;' 'background-color: light gray;')
def Determine_abnormal(self, abnormal_diagnosis):
if abnormal_diagnosis == 0: # 정상상태
self.normal_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;')
self.abnormal_label.setStyleSheet('color : black;' 'background-color: light gray;')
else: # 비정상상태
self.abnormal_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
self.normal_label.setStyleSheet('color : black;' 'background-color: light gray;')
def Determine_procedure(self, abnormal_procedure_result):
if abnormal_procedure_result == 0:
self.num_procedure.setText('Normal')
self.num_scnario.setText('Normal')
elif abnormal_procedure_result == 1:
self.num_procedure.setText('Ab21-01')
self.num_scnario.setText('가압기 압력 채널 고장 "고"')
elif abnormal_procedure_result == 2:
self.num_procedure.setText('Ab21-02')
self.num_scnario.setText('가압기 압력 채널 고장 "저"')
elif abnormal_procedure_result == 3:
self.num_procedure.setText('Ab20-04')
self.num_scnario.setText('가압기 수위 채널 고장 "저"')
elif abnormal_procedure_result == 4:
self.num_procedure.setText('Ab15-07')
self.num_scnario.setText('증기발생기 수위 채널 고장 "저"')
elif abnormal_procedure_result == 5:
self.num_procedure.setText('Ab15-08')
self.num_scnario.setText('증기발생기 수위 채널 고장 "고"')
elif abnormal_procedure_result == 6:
self.num_procedure.setText('Ab63-04')
self.num_scnario.setText('제어봉 낙하')
elif abnormal_procedure_result == 7:
self.num_procedure.setText('Ab63-02')
self.num_scnario.setText('제어봉의 계속적인 삽입')
elif abnormal_procedure_result == 8:
self.num_procedure.setText('Ab21-12')
# self.num_scnario.setText('가압기 PORV 열림')
self.num_scnario.setText('Pressurizer PORV opening')
elif abnormal_procedure_result == 9:
self.num_procedure.setText('Ab19-02')
self.num_scnario.setText('가압기 안전밸브 고장')
elif abnormal_procedure_result == 10:
self.num_procedure.setText('Ab21-11')
self.num_scnario.setText('가압기 살수밸브 고장 "열림"')
elif abnormal_procedure_result == 11:
self.num_procedure.setText('Ab23-03')
self.num_scnario.setText('1차기기 냉각수 계통으로 누설 "CVCS->CCW"')
elif abnormal_procedure_result == 12:
self.num_procedure.setText('Ab60-02')
self.num_scnario.setText('재생열교환기 전단부위 파열')
elif abnormal_procedure_result == 13:
self.num_procedure.setText('Ab59-02')
self.num_scnario.setText('충전수 유량조절밸브 후단 누설')
elif abnormal_procedure_result == 14:
self.num_procedure.setText('Ab23-01')
self.num_scnario.setText('1차기기 냉각수 계통으로 누설 "RCS->CCW"')
elif abnormal_procedure_result == 15:
self.num_procedure.setText('Ab23-06')
self.num_scnario.setText('증기발생기 전열관 누설')
def verifit_result(self, verif_value):
if verif_value[0] <= verif_value[1]: # 진단 성공
self.success_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;')
self.failure_label.setStyleSheet('color : black;' 'background-color: light gray;')
else: # 진단 실패
self.failure_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
self.success_label.setStyleSheet('color : black;' 'background-color: light gray;')
def procedure_satisfaction(self, symptom_db):
# symptom_db[0] : classification result [0~15]
# symptom_db[1] : check_db [2,2222] -> 현시점과 이전시점 비교를 위함.
# symptom_db[1].iloc[0] : 이전 시점 # symptom_db[1].iloc[1] : 현재 시점
if symptom_db[0] == 0: # 정상 상태
self.symptom_name.setText('Diagnosis Result : Normal → Symptoms : 0')
self.symptom1.setText('')
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('')
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText('')
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText('')
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText('')
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom6.setText('')
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 1:
self.symptom_name.setText('Diagnosis Result : Ab21-01 Pressurizer pressure channel failure "High" → Symptoms : 6')
self.symptom1.setText("채널 고장으로 인한 가압기 '고' 압력 지시")
if symptom_db[1].iloc[1]['PPRZN'] > symptom_db[1].iloc[1]['CPPRZH']:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText("가압기 살수밸브 '열림' 지시")
if symptom_db[1].iloc[1]['BPRZSP'] > 0:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText("가압기 비례전열기 꺼짐")
if symptom_db[1].iloc[1]['QPRZP'] == 0:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText("가압기 보조전열기 꺼짐")
if symptom_db[1].iloc[1]['QPRZB'] == 0:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText("실제 가압기 '저' 압력 지시")
if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL']:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom6.setText("가압기 PORV 차단밸브 닫힘")
if symptom_db[1].iloc[1]['BHV6'] == 0:
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 2:
self.symptom_name.setText('진단 : Ab21-02 가압기 압력 채널 고장 "저" → 증상 : 5')
self.symptom1.setText("채널 고장으로 인한 가압기 '저' 압력 지시")
if symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CPPRZL']:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('가압기 저압력으로 인한 보조 전열기 켜짐 지시 및 경보 발생')
if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1):
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText("실제 가압기 '고' 압력 지시")
if symptom_db[1].iloc[1]['PPRZ'] > symptom_db[1].iloc[1]['CPPRZH']:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText('가압기 PORV 열림 지시 및 경보 발생')
if symptom_db[1].iloc[1]['BPORV'] > 0:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText('실제 가압기 압력 감소로 가압기 PORV 닫힘') # 가압기 압력 감소에 대해 해결해야함.
if symptom_db[1].iloc[1]['BPORV'] == 0 and (symptom_db[1].iloc[0]['PPRZ'] > symptom_db[1].iloc[1]['PPRZ']):
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 3:
self.symptom_name.setText('진단 : Ab20-04 가압기 수위 채널 고장 "저" → 증상 : 5')
self.symptom1.setText("채널 고장으로 인한 가압기 '저' 수위 지시")
if symptom_db[1].iloc[1]['ZINST63'] < 17: # 나중에 다시 확인해야함.
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('"LETDN HX OUTLET FLOW LOW" 경보 발생')
if symptom_db[1].iloc[1]['UNRHXUT'] > symptom_db[1].iloc[1]['CULDHX']:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText('"CHARGING LINE FLOW HI/LO" 경보 발생')
if (symptom_db[1].iloc[1]['WCHGNO'] < symptom_db[1].iloc[1]['CWCHGL']) or (symptom_db[1].iloc[1]['WCHGNO'] > symptom_db[1].iloc[1]['CWCHGH']):
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText('충전 유량 증가')
if symptom_db[1].iloc[0]['WCHGNO'] < symptom_db[1].iloc[1]['WCHGNO']:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText('건전한 수위지시계의 수위 지시치 증가')
if symptom_db[1].iloc[0]['ZPRZNO'] < symptom_db[1].iloc[1]['ZPRZNO']:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 4:
self.symptom_name.setText('진단 : Ab15-07 증기발생기 수위 채널 고장 "저" → 증상 : ')
self.symptom1.setText('증기발생기 수위 "저" 경보 발생')
if symptom_db[1].iloc[1]['ZINST78']*0.01 < symptom_db[1].iloc[1]['CZSGW'] or symptom_db[1].iloc[1]['ZINST77']*0.01 < symptom_db[1].iloc[1]['CZSGW'] or symptom_db[1].iloc[1]['ZINST76']*0.01 < symptom_db[1].iloc[1]['CZSGW']:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('해당 SG MFCV 열림 방향으로 진행 및 해당 SG 실제 급수유량 증가')
elif symptom_db[0] == 8:
# self.symptom_name.setText('진단 : Ab21-12 가압기 PORV 열림 → 증상 : 5')
self.symptom_name.setText('Diagnosis result : Ab21-12 Pressurizer PORV opening → Symptoms : 5')
# self.symptom1.setText('가압기 PORV 열림 지시 및 경보 발생')
self.symptom1.setText('Pressurizer PORV open indication and alarm')
if symptom_db[1].iloc[1]['BPORV'] > 0:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom2.setText('가압기 저압력으로 인한 보조 전열기 켜짐 지시 및 경보 발생')
self.symptom2.setText('Aux. heater turn on instruction and alarm due to pressurizer low pressure')
if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1):
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom3.setText("가압기 '저' 압력 지시 및 경보 발생")
self.symptom3.setText("pressurizer 'low' pressure indication and alarm")
if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL'] :
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom4.setText("PRT 고온 지시 및 경보 발생")
self.symptom4.setText("PRT high temperature indication and alarm")
if symptom_db[1].iloc[1]['UPRT'] > symptom_db[1].iloc[1]['CUPRT'] :
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom5.setText("PRT 고압 지시 및 경보 발생")
self.symptom5.setText("PRT high pressure indication and alarm")
if (symptom_db[1].iloc[1]['PPRT'] - 0.98E5) > symptom_db[1].iloc[1]['CPPRT']:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom6.setText("Blank")
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 10:
self.symptom_name.setText("진단 : Ab21-11 가압기 살수밸브 고장 '열림' → 증상 : 4")
self.symptom1.setText("가압기 살수밸브 '열림' 지시 및 상태 표시등 점등")
if symptom_db[1].iloc[1]['BPRZSP'] > 0:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText("가압기 보조전열기 켜짐 지시 및 경보 발생")
if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1):
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText("가압기 '저' 압력 지시 및 경보 발생")
if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL']:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText("가압기 수위 급격한 증가") # 급격한 증가에 대한 수정은 필요함 -> 추후 수정
if symptom_db[1].iloc[0]['ZINST63'] < symptom_db[1].iloc[1]['ZINST63']:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
def explain_result(self, shap_add_des):
'''
# shap_add_des['index'] : 변수 이름 / shap_add_des[0] : shap value
# shap_add_des['describe'] : 변수에 대한 설명 / shap_add_des['probability'] : shap value를 확률로 환산한 값
'''
self.tableWidget.setRowCount(len(shap_add_des))
self.tableWidget.setColumnCount(4)
self.tableWidget.setHorizontalHeaderLabels(["value_name", 'probability', 'describe', 'system'])
header = self.tableWidget.horizontalHeader()
header.setSectionResizeMode(QHeaderView.ResizeToContents)
header.setSectionResizeMode(0, QHeaderView.Stretch)
header.setSectionResizeMode(1, QHeaderView.Stretch)
header.setSectionResizeMode(2, QHeaderView.ResizeToContents)
header.setSectionResizeMode(3, QHeaderView.Stretch)
[self.tableWidget.setItem(i, 0, QTableWidgetItem(f"{shap_add_des['index'][i]}")) for i in range(len(shap_add_des['index']))]
[self.tableWidget.setItem(i, 1, QTableWidgetItem(f"{round(shap_add_des['probability'][i],2)}%")) for i in range(len(shap_add_des['probability']))]
[self.tableWidget.setItem(i, 2, QTableWidgetItem(f"{shap_add_des['describe'][i]}")) for i in range(len(shap_add_des['describe']))]
[self.tableWidget.setItem(i, 3, QTableWidgetItem(f"{shap_add_des['system'][i]}")) for i in range(len(shap_add_des['system']))]
delegate = AlignDelegate(self.tableWidget)
self.tableWidget.setItemDelegate(delegate)
def show_table(self):
self.worker.shap.connect(self.explain_result)
# 클릭시 Thread를 통해 신호를 전달하기 때문에 버퍼링이 발생함. 2초 정도? 이 부분은 나중에 생각해서 초기에 불러올지 고민해봐야할듯.
self.tableWidget.show()
def plotting(self, symptom_db):
# symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1]
# -- scatter --
# time = []
# value1, value2, value3 = [], [], []
# time.append(symptom_db[0])
# value1.append(round(symptom_db[1].iloc[1]['ZVCT'],2))
# value2.append(round(symptom_db[1].iloc[1]['BPORV'],2))
# value3.append(round(symptom_db[1].iloc[1]['UPRZ'],2))
# self.plotting_1 = self.plot_1.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5)
# self.plotting_2 = self.plot_2.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5)
# self.plotting_3 = self.plot_3.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5)
# -- Line plotting --
# self.plotting_1 = self.plot_1.plot(pen='w')
# self.plotting_2 = self.plot_2.plot(pen='w')
# self.plotting_3 = self.plot_3.plot(pen='w')
# self.plotting_4 = self.plot_4.plot(pen='w')
self.plot_1.showGrid(x=True, y=True, alpha=0.3)
self.plot_2.showGrid(x=True, y=True, alpha=0.3)
self.plot_3.showGrid(x=True, y=True, alpha=0.3)
self.plot_4.showGrid(x=True, y=True, alpha=0.3)
self.plotting_1 = self.plot_1.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_2 = self.plot_2.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_3 = self.plot_3.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_4 = self.plot_4.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_1.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['BPORV'])
self.plot_1.setTitle('PORV open state')
self.plotting_2.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['PPRZN'])
self.plot_2.setTitle('Pressurizer pressure')
self.plotting_3.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['UPRT'])
self.plot_3.setTitle('PRT temperature')
self.plotting_4.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['PPRT'])
self.plot_4.setTitle('PRT pressure')
# red_range = display_db[display_db['probability'] >= 10] # 10% 이상의 확률을 가진 변수
#
# print(bool(red_range["describe"].iloc[3]))
# try :
# self.plotting_1.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]])
# if red_range["describe"].iloc[0] == None:
# self.plot_1.setTitle(self)
# else:
# self.plot_1.setTitle(f'{red_range["describe"].iloc[0]}')
# # self.plot_1.clear()
# except:
# print('plot1 fail')
# try:
# self.plotting_2.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]])
# if red_range["describe"].iloc[1] == None:
# self.plot_2.setTitle(self)
# else:
# self.plot_2.setTitle(f'{red_range["describe"].iloc[1]}')
# # self.plot_2.clear()
# except:
# print('plot2 fail')
# try:
# self.plotting_3.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[2]])
# if red_range["describe"].iloc[2] == None:
# self.plot_3.setTitle(self)
# else:
# self.plot_3.setTitle(f'{red_range["describe"].iloc[2]}')
# # self.plot_3.clear()
# except:
# print('plot3 fail')
# try:
# self.plotting_4.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[3]])
# if red_range["describe"].iloc[3] == None:
# self.plot_4.setTitle(self)
# else:
# self.plot_4.setTitle(f'{red_range["describe"].iloc[3]}')
# # self.plot_4.clear()
# except:
# print('plot4 fail')
def display_explain(self, display_db, symptom_db, normal_db):
'''
# display_db['index'] : 변수 이름 / display_db[0] : shap value
# display_db['describe'] : 변수에 대한 설명 / display_db['probability'] : shap value를 확률로 환산한 값
# symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1]
'''
red_range = display_db[display_db['probability'] >=10]
orange_range = display_db[[display_db['probability'].iloc[i]<10 and display_db['probability'].iloc[i]>1 for i in range(len(display_db['probability']))]]
convert_red = {0: self.red1, 1: self.red2, 2: self.red3, 3: self.red4}
convert_orange = {0: self.orange1, 1: self.orange2, 2: self.orange3, 3: self.orange4, 4: self.orange5, 5: self.orange6, 6: self.orange7, 7: self.orange8, 8: self.orange9, 9: self.orange10, 10: self.orange11, 11: self.orange12}
if 4-len(red_range) == 0:
red_del = []
elif 4-len(red_range) == 1:
red_del = [3]
elif 4-len(red_range) == 2:
red_del = [2,3]
elif 4-len(red_range) == 3:
red_del = [1,2,3]
elif 4-len(red_range) == 4:
red_del = [0,1,2,3]
if 12-len(orange_range) == 0:
orange_del = []
elif 12-len(orange_range) == 1:
orange_del = [11]
elif 12-len(orange_range) == 2:
orange_del = [10,11]
elif 12-len(orange_range) == 3:
orange_del = [9,10,11]
elif 12-len(orange_range) == 4:
orange_del = [8,9,10,11]
elif 12-len(orange_range) == 5:
orange_del = [7,8,9,10,11]
elif 12-len(orange_range) == 6:
orange_del = [6,7,8,9,10,11]
elif 12-len(orange_range) == 7:
orange_del = [5,6,7,8,9,10,11]
elif 12-len(orange_range) == 8:
orange_del = [4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 9:
orange_del = [3,4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 10:
orange_del = [2,3,4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 11:
orange_del = [1,2,3,4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 12:
orange_del = [0,1,2,3,4,5,6,7,8,9,10,11]
[convert_red[i].setText(f'{red_range["describe"].iloc[i]} \n[{round(red_range["probability"].iloc[i],2)}%]') for i in range(len(red_range))]
[convert_red[i].setText('None\nParameter') for i in red_del]
[convert_red[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: blue;') for i in range(len(red_range))]
[convert_red[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in red_del]
[convert_orange[i].setText(f'{orange_range["describe"].iloc[i]} \n[{round(orange_range["probability"].iloc[i],2)}%]') for i in range(len(orange_range))]
[convert_orange[i].setText('None\nParameter') for i in orange_del]
# [convert_orange[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: orange;') for i in range(len(orange_range))]
# [convert_orange[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in orange_del]
# 각 Button에 호환되는 Plotting 데이터 구축
# Red1 Button
if self.red1.text().split()[0] != 'None':
self.red_plot_1.clear()
self.red_plot_1.setTitle(red_range['describe'].iloc[0])
self.red_plot_1.addLegend(offset=(-30,20))
self.red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name = 'Real Data')
self.red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name = 'Normal Data')
# Red2 Button
if self.red2.text().split()[0] != 'None':
self.red_plot_2.clear()
self.red_plot_2.setTitle(red_range['describe'].iloc[1])
self.red_plot_2.addLegend(offset=(-30, 20))
self.red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Red3 Button
if self.red3.text().split()[0] != 'None':
self.red_plot_3.clear()
self.red_plot_3.setTitle(red_range['describe'].iloc[2])
self.red_plot_3.addLegend(offset=(-30, 20))
self.red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Red4 Button
if self.red4.text().split()[0] != 'None':
self.red_plot_4.clear()
self.red_plot_4.setTitle(red_range['describe'].iloc[3])
self.red_plot_4.addLegend(offset=(-30, 20))
self.red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange1 Button
if self.orange1.text().split()[0] != 'None':
self.orange_plot_1.clear()
self.orange_plot_1.setTitle(orange_range['describe'].iloc[0])
self.orange_plot_1.addLegend(offset=(-30, 20))
self.orange_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange2 Button
if self.orange2.text().split()[0] != 'None':
self.orange_plot_2.clear()
self.orange_plot_2.setTitle(orange_range['describe'].iloc[1])
self.orange_plot_2.addLegend(offset=(-30, 20))
self.orange_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange3 Button
if self.orange3.text().split()[0] != 'None':
self.orange_plot_3.clear()
self.orange_plot_3.setTitle(orange_range['describe'].iloc[2])
self.orange_plot_3.addLegend(offset=(-30, 20))
self.orange_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange4 Button
if self.orange4.text().split()[0] != 'None':
self.orange_plot_4.clear()
self.orange_plot_4.setTitle(orange_range['describe'].iloc[3])
self.orange_plot_4.addLegend(offset=(-30, 20))
self.orange_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[3]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange5 Button
if self.orange5.text().split()[0] != 'None':
self.orange_plot_5.clear()
self.orange_plot_5.setTitle(orange_range['describe'].iloc[4])
self.orange_plot_5.addLegend(offset=(-30, 20))
self.orange_plot_5.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[4]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_5.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[4]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange6 Button
if self.orange6.text().split()[0] != 'None':
self.orange_plot_6.clear()
self.orange_plot_6.setTitle(orange_range['describe'].iloc[5])
self.orange_plot_6.addLegend(offset=(-30, 20))
self.orange_plot_6.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[5]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_6.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[5]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange7 Button
if self.orange7.text().split()[0] != 'None':
self.orange_plot_7.clear()
self.orange_plot_7.setTitle(orange_range['describe'].iloc[6])
self.orange_plot_7.addLegend(offset=(-30, 20))
self.orange_plot_7.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[6]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_7.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[6]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange8 Button
if self.orange8.text().split()[0] != 'None':
self.orange_plot_8.clear()
self.orange_plot_8.setTitle(orange_range['describe'].iloc[7])
self.orange_plot_8.addLegend(offset=(-30, 20))
self.orange_plot_8.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[7]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_8.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[7]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange9 Button
if self.orange9.text().split()[0] != 'None':
self.orange_plot_9.clear()
self.orange_plot_9.setTitle(orange_range['describe'].iloc[8])
self.orange_plot_9.addLegend(offset=(-30, 20))
self.orange_plot_9.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[8]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_9.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[8]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange10 Button
if self.orange10.text().split()[0] != 'None':
self.orange_plot_10.clear()
self.orange_plot_10.setTitle(orange_range['describe'].iloc[9])
self.orange_plot_10.addLegend(offset=(-30, 20))
self.orange_plot_10.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[9]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_10.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[9]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange11 Button
if self.orange11.text().split()[0] != 'None':
self.orange_plot_11.clear()
self.orange_plot_11.setTitle(orange_range['describe'].iloc[10])
self.orange_plot_11.addLegend(offset=(-30, 20))
self.orange_plot_11.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[10]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_11.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[10]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange12 Button
if self.orange12.text().split()[0] != 'None':
self.orange_plot_12.clear()
self.orange_plot_12.setTitle(orange_range['describe'].iloc[11])
self.orange_plot_12.addLegend(offset=(-30, 20))
self.orange_plot_12.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[11]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_12.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[11]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
[convert_red[i].setCheckable(True) for i in range(4)]
[convert_orange[i].setCheckable(True) for i in range(12)]
def red1_plot(self):
if self.red1.isChecked():
if self.red1.text().split()[0] != 'None':
self.red_plot_1.show()
self.red1.setCheckable(False)
def red2_plot(self):
if self.red2.isChecked():
if self.red2.text().split()[0] != 'None':
self.red_plot_2.show()
self.red2.setCheckable(False)
def red3_plot(self):
if self.red3.isChecked():
if self.red3.text().split()[0] != 'None':
self.red_plot_3.show()
self.red3.setCheckable(False)
def red4_plot(self):
if self.red4.isChecked():
if self.red4.text().split()[0] != 'None':
self.red_plot_4.show()
self.red4.setCheckable(False)
def orange1_plot(self):
if self.orange1.isChecked():
if self.orange1.text().split()[0] != 'None':
self.orange_plot_1.show()
self.orange1.setCheckable(False)
def orange2_plot(self):
if self.orange2.isChecked():
if self.orange2.text().split()[0] != 'None':
self.orange_plot_2.show()
self.orange2.setCheckable(False)
def orange3_plot(self):
if self.orange3.isChecked():
if self.orange3.text().split()[0] != 'None':
self.orange_plot_3.show()
self.orange3.setCheckable(False)
def orange4_plot(self):
if self.orange4.isChecked():
if self.orange4.text().split()[0] != 'None':
self.orange_plot_4.show()
self.orange4.setCheckable(False)
def orange5_plot(self):
if self.orange5.isChecked():
if self.orange5.text().split()[0] != 'None':
self.orange_plot_5.show()
self.orange5.setCheckable(False)
def orange6_plot(self):
if self.orange6.isChecked():
if self.orange6.text().split()[0] != 'None':
self.orange_plot_6.show()
self.orange6.setCheckable(False)
def orange7_plot(self):
if self.orange7.isChecked():
if self.orange7.text().split()[0] != 'None':
self.orange_plot_7.show()
self.orange7.setCheckable(False)
def orange8_plot(self):
if self.orange8.isChecked():
if self.orange8.text().split()[0] != 'None':
self.orange_plot_8.show()
self.orange8.setCheckable(False)
def orange9_plot(self):
if self.orange9.isChecked():
if self.orange9.text().split()[0] != 'None':
self.orange_plot_9.show()
self.orange9.setCheckable(False)
def orange10_plot(self):
if self.orange10.isChecked():
if self.orange10.text().split()[0] != 'None':
self.orange_plot_10.show()
self.orange10.setCheckable(False)
def orange11_plot(self):
if self.orange11.isChecked():
if self.orange11.text().split()[0] != 'None':
self.orange_plot_11.show()
self.orange11.setCheckable(False)
def orange12_plot(self):
if self.orange12.isChecked():
if self.orange12.text().split()[0] != 'None':
self.orange_plot_12.show()
self.orange12.setCheckable(False)
def show_another_result(self):
self.other = another_result_explain()
self.worker.another_shap_table.connect(self.other.show_another_result_table)
self.worker.another_shap.connect(self.other.show_shap)
self.other.show()
class another_result_explain(QWidget):
def __init__(self):
super().__init__()
# 서브 인터페이스 초기 설정
self.setWindowTitle('Another Result Explanation')
self.setGeometry(300, 300, 800, 500)
self.selected_para = pd.read_csv('./DataBase/Final_parameter_200825.csv')
# 레이아웃 구성
combo_layout = QVBoxLayout()
self.title_label = QLabel("<b>선택되지 않은 시나리오에 대한 결과 해석<b/>")
self.title_label.setAlignment(Qt.AlignCenter)
self.blank = QLabel(self) # Enter를 위한 라벨
self.show_table = QPushButton("Show Table")
self.cb = QComboBox(self)
self.cb.addItem('Normal')
self.cb.addItem('Ab21-01: Pressurizer pressure channel failure (High)')
self.cb.addItem('Ab21-02: Pressurizer pressure channel failure (Low)')
self.cb.addItem('Ab20-04: Pressurizer level channel failure (Low)')
self.cb.addItem('Ab15-07: Steam generator level channel failure (High)')
self.cb.addItem('Ab15-08: Steam generator level channel failure (Low)')
self.cb.addItem('Ab63-04: Control rod fall')
self.cb.addItem('Ab63-02: Continuous insertion of control rod')
self.cb.addItem('Ab21-12: Pressurizer PORV opening')
self.cb.addItem('Ab19-02: Pressurizer safety valve failure')
self.cb.addItem('Ab21-11: Pressurizer spray valve failed opening')
self.cb.addItem('Ab23-03: Leakage from CVCS to RCS')
self.cb.addItem('Ab60-02: Rupture of the front end of the regenerative heat exchanger')
self.cb.addItem('Ab59-02: Leakage at the rear end of the charging flow control valve')
self.cb.addItem('Ab23-01: Leakage from CVCS to CCW')
self.cb.addItem('Ab23-06: Steam generator u-tube leakage')
# Explanation Alarm 구현
cb_red_alarm = QGroupBox('Main basis for diagnosis')
cb_red_alarm_layout = QGridLayout()
cb_orange_alarm = QGroupBox('Sub basis for diagnosis')
cb_orange_alarm_layout = QGridLayout()
# Display Button 생성
self.cb_red1 = QPushButton(self)
self.cb_red2 = QPushButton(self)
self.cb_red3 = QPushButton(self)
self.cb_red4 = QPushButton(self)
self.cb_orange1 = QPushButton(self)
self.cb_orange2 = QPushButton(self)
self.cb_orange3 = QPushButton(self)
self.cb_orange4 = QPushButton(self)
self.cb_orange5 = QPushButton(self)
self.cb_orange6 = QPushButton(self)
self.cb_orange7 = QPushButton(self)
self.cb_orange8 = QPushButton(self)
self.cb_orange9 = QPushButton(self)
self.cb_orange10 = QPushButton(self)
self.cb_orange11 = QPushButton(self)
self.cb_orange12 = QPushButton(self)
# Layout에 widget 삽입
cb_red_alarm_layout.addWidget(self.cb_red1, 0, 0)
cb_red_alarm_layout.addWidget(self.cb_red2, 0, 1)
cb_red_alarm_layout.addWidget(self.cb_red3, 1, 0)
cb_red_alarm_layout.addWidget(self.cb_red4, 1, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange1, 0, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange2, 0, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange3, 1, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange4, 1, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange5, 2, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange6, 2, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange7, 3, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange8, 3, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange9, 4, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange10, 4, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange11, 5, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange12, 5, 1)
cb_red_alarm.setLayout(cb_red_alarm_layout)
cb_orange_alarm.setLayout(cb_orange_alarm_layout)
combo_layout.addWidget(self.title_label)
combo_layout.addWidget(self.blank)
combo_layout.addWidget(self.cb)
combo_layout.addWidget(self.blank)
# combo_layout.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
combo_layout.addWidget(cb_red_alarm)
combo_layout.addWidget(cb_orange_alarm)
combo_layout.addWidget(self.blank)
combo_layout.addWidget(self.show_table)
combo_layout.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
self.setLayout(combo_layout)
self.combo_tableWidget = QTableWidget(0, 0)
self.combo_tableWidget.setFixedHeight(500)
self.combo_tableWidget.setFixedWidth(800)
# self.combo_tableWidget = QTableWidget(0, 0)
# 이벤트 처리 부분 ########################################################
self.show_table.clicked.connect(self.show_anoter_table)
self.cb.activated[str].connect(self.show_another_result_table)
self.cb.activated[str].connect(self.show_shap)
##########################################################################
# Button 클릭 연동 이벤트 처리
convert_cb_red_btn = {0: self.cb_red1, 1: self.cb_red2, 2: self.cb_red3, 3: self.cb_red4} # Red Button
convert_cb_red_plot = {0: self.cb_red1_plot, 1: self.cb_red2_plot, 2: self.cb_red3_plot, 3: self.cb_red4_plot}
convert_cb_orange_btn = {0: self.cb_orange1, 1: self.cb_orange2, 2: self.cb_orange3, 3: self.cb_orange4, 4: self.cb_orange5,
5: self.cb_orange6, 6: self.cb_orange7, 7: self.cb_orange8, 8: self.cb_orange9, 9: self.cb_orange10,
10: self.cb_orange11, 11: self.cb_orange12} # Orange Button
convert_cb_orange_plot = {0: self.cb_orange1_plot, 1: self.cb_orange2_plot, 2: self.cb_orange3_plot, 3: self.cb_orange4_plot,
4: self.cb_orange5_plot, 5: self.cb_orange6_plot, 6: self.cb_orange7_plot, 7: self.cb_orange8_plot,
8: self.cb_orange9_plot, 9: self.cb_orange10_plot, 10: self.cb_orange11_plot, 11: self.cb_orange12_plot}
################################################################################################################
# 초기 Button 위젯 선언 -> 초기에 선언해야 끊기지않고 유지됨.
# Red Button
[convert_cb_red_btn[i].clicked.connect(convert_cb_red_plot[i]) for i in range(4)]
self.cb_red_plot_1 = pyqtgraph.PlotWidget(title=self)
self.cb_red_plot_2 = pyqtgraph.PlotWidget(title=self)
self.cb_red_plot_3 = pyqtgraph.PlotWidget(title=self)
self.cb_red_plot_4 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.cb_red_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.cb_red_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.cb_red_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.cb_red_plot_4.showGrid(x=True, y=True, alpha=0.3)
# Orange Button
[convert_cb_orange_btn[i].clicked.connect(convert_cb_orange_plot[i]) for i in range(12)]
self.cb_orange_plot_1 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_2 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_3 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_4 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_5 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_6 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_7 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_8 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_9 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_10 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_11 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_12 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.cb_orange_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_4.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_5.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_6.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_7.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_8.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_9.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_10.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_11.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_12.showGrid(x=True, y=True, alpha=0.3)
################################################################################################################
self.show() # Sub UI show command
def show_shap(self, all_shap, symptom_db, compare_data):
# all_shap : 전체 시나리오에 해당하는 shap_value를 가지고 있음.
# symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1]
if self.cb.currentText() == 'Normal':
step1 = pd.DataFrame(all_shap[0], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()]
elif self.cb.currentText() == 'Ab21-01: Pressurizer pressure channel failure (High)':
step1 = pd.DataFrame(all_shap[1], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab21-02: Pressurizer pressure channel failure (Low)':
step1 = pd.DataFrame(all_shap[2], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab20-04: Pressurizer level channel failure (Low)':
step1 = pd.DataFrame(all_shap[3], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab15-07: Steam generator level channel failure (High)':
step1 = pd.DataFrame(all_shap[4], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab15-08: Steam generator level channel failure (Low)':
step1 = pd.DataFrame(all_shap[5], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab63-04: Control rod fall':
step1 = pd.DataFrame(all_shap[6], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab63-02: Continuous insertion of control rod':
step1 = pd.DataFrame(all_shap[7], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab21-12: Pressurizer PORV opening':
step1 = pd.DataFrame(all_shap[8], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab19-02: Pressurizer safety valve failure':
step1 = pd.DataFrame(all_shap[9], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab21-11: Pressurizer spray valve failed opening':
step1 = pd.DataFrame(all_shap[10], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab23-03: Leakage from CVCS to RCS':
step1 = pd.DataFrame(all_shap[11], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab60-02: Rupture of the front end of the regenerative heat exchanger':
step1 = pd.DataFrame(all_shap[12], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab59-02: Leakage at the rear end of the charging flow control valve':
step1 = pd.DataFrame(all_shap[13], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab23-01: Leakage from CVCS to CCW':
step1 = pd.DataFrame(all_shap[14], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab23-06: Steam generator u-tube leakage':
step1 = pd.DataFrame(all_shap[15], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
step2 = step1.sort_values(by=0, ascending=True, axis=1)
step3 = step2[step2.iloc[:] < 0].dropna(axis=1).T
self.step4 = step3.reset_index()
col = self.step4['index']
var = [self.selected_para['0'][self.selected_para['0'] == col_].index for col_ in col]
val_col = [self.selected_para['1'][var_].iloc[0] for var_ in var]
proba = [(self.step4[0][val_num] / sum(self.step4[0])) * 100 for val_num in range(len(self.step4[0]))]
val_system = [self.selected_para['2'][var_].iloc[0] for var_ in var]
self.step4['describe'] = val_col
self.step4['probability'] = proba
self.step4['system'] = val_system
red_range = self.step4[self.step4['probability'] >= 10]
orange_range = self.step4[
[self.step4['probability'].iloc[i] < 10 and self.step4['probability'].iloc[i] > 1 for i in
range(len(self.step4['probability']))]]
convert_red = {0: self.cb_red1, 1: self.cb_red2, 2: self.cb_red3, 3: self.cb_red4}
convert_orange = {0: self.cb_orange1, 1: self.cb_orange2, 2: self.cb_orange3, 3: self.cb_orange4, 4: self.cb_orange5,
5: self.cb_orange6, 6: self.cb_orange7, 7: self.cb_orange8, 8: self.cb_orange9, 9: self.cb_orange10,
10: self.cb_orange11, 11: self.cb_orange12}
if 4 - len(red_range) == 0:
red_del = []
elif 4 - len(red_range) == 1:
red_del = [3]
elif 4 - len(red_range) == 2:
red_del = [2, 3]
elif 4 - len(red_range) == 3:
red_del = [1, 2, 3]
elif 4 - len(red_range) == 4:
red_del = [0, 1, 2, 3]
if 12 - len(orange_range) == 0:
orange_del = []
elif 12 - len(orange_range) == 1:
orange_del = [11]
elif 12 - len(orange_range) == 2:
orange_del = [10, 11]
elif 12 - len(orange_range) == 3:
orange_del = [9, 10, 11]
elif 12 - len(orange_range) == 4:
orange_del = [8, 9, 10, 11]
elif 12 - len(orange_range) == 5:
orange_del = [7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 6:
orange_del = [6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 7:
orange_del = [5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 8:
orange_del = [4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 9:
orange_del = [3, 4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 10:
orange_del = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 11:
orange_del = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 12:
orange_del = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
[convert_red[i].setText(f'{red_range["describe"].iloc[i]} \n[{round(red_range["probability"].iloc[i], 2)}%]') for i in range(len(red_range))]
[convert_red[i].setText('None\nParameter') for i in red_del]
[convert_red[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: blue;') for i in range(len(red_range))]
[convert_red[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in red_del]
[convert_orange[i].setText(f'{orange_range["describe"].iloc[i]} \n[{round(orange_range["probability"].iloc[i], 2)}%]') for i in range(len(orange_range))]
[convert_orange[i].setText('None\nParameter') for i in orange_del]
#####################################################################################################################################
# 각 Button에 호환되는 Plotting 데이터 구축
# Red1 Button
if self.cb_red1.text().split()[0] != 'None':
self.cb_red_plot_1.clear()
self.cb_red_plot_1.setTitle(red_range['describe'].iloc[0])
self.cb_red_plot_1.addLegend(offset=(-30,20))
self.cb_red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Red2 Button
if self.cb_red2.text().split()[0] != 'None':
self.cb_red_plot_2.clear()
self.cb_red_plot_2.setTitle(red_range['describe'].iloc[1])
self.cb_red_plot_2.addLegend(offset=(-30, 20))
self.cb_red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Red3 Button
if self.cb_red3.text().split()[0] != 'None':
self.cb_red_plot_3.clear()
self.cb_red_plot_3.setTitle(red_range['describe'].iloc[2])
self.cb_red_plot_3.addLegend(offset=(-30, 20))
self.cb_red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Red4 Button
if self.cb_red4.text().split()[0] != 'None':
self.cb_red_plot_4.clear()
self.cb_red_plot_4.setTitle(red_range['describe'].iloc[3])
self.cb_red_plot_4.addLegend(offset=(-30, 20))
self.cb_red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Orange1 Button
if self.cb_orange1.text().split()[0] != 'None':
self.cb_orange_plot_1.clear()
self.cb_orange_plot_1.setTitle(orange_range['describe'].iloc[0])
self.cb_orange_plot_1.addLegend(offset=(-30, 20))
self.cb_orange_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_orange_plot_1.plot(x=symptom_db[0], y= | pd.DataFrame(compared_db) | pandas.DataFrame |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This file implements the Bayesian Online Changepoint Detection
algorithm as a DetectorModel, to provide a common interface.
"""
import json
from typing import Any, Optional
import numpy as np
import pandas as pd
from kats.consts import TimeSeriesData
from kats.detectors.bocpd import (
BOCPDetector,
BOCPDModelType,
)
from kats.detectors.detector import DetectorModel
from kats.detectors.detector_consts import (
AnomalyResponse,
ConfidenceBand,
)
from statsmodels.tsa.holtwinters import ExponentialSmoothing
class BocpdDetectorModel(DetectorModel):
"""Implements the Bayesian Online Changepoint Detection as a DetectorModel.
This provides an unified interface, which is common to all detection algorithms.
Attributes:
serialized_model: json containing information about stored model.
slow_drift: Boolean. True indicates we are trying to detect trend changes.
False indicates we are trying to detect level changes.
Typical Usage:
level_ts is an instance of TimeSeriesData
>>> bocpd_detector = BocpdDetectorModel()
>>> anom = bocpd_detector.fit_predict(data=level_ts)
"""
def __init__(
self,
serialized_model: Optional[bytes] = None,
slow_drift: bool = False,
threshold: Optional[float] = None,
) -> None:
self.slow_drift: bool = False
self.threshold: Optional[float] = None
self.response: Optional[AnomalyResponse] = None
self.last_N: int = 0
if serialized_model is None:
self.slow_drift = slow_drift
self.threshold = threshold
else:
model_dict = json.loads(serialized_model)
if "slow_drift" in model_dict:
self.slow_drift = model_dict["slow_drift"]
else:
self.slow_drift = slow_drift
if "threshold" in model_dict:
self.threshold = model_dict["threshold"]
else:
self.threshold = threshold
def serialize(self) -> bytes:
"""Returns the serialzed model.
Args:
None.
Returns:
json containing information about serialized model.
"""
model_dict = {"slow_drift": self.slow_drift}
return json.dumps(model_dict).encode("utf-8")
def _handle_missing_data_extend(
self, data: TimeSeriesData, historical_data: TimeSeriesData
) -> TimeSeriesData:
# extend() works only when there is no missing data
# hence, we will interpolate if there is missing data
# but we will remove the interpolated data when we
# evaluate, to make sure that the anomaly score is
# the same length as data
original_time_list = list(historical_data.time) + list(data.time)
if historical_data.is_data_missing():
historical_data = historical_data.interpolate()
if data.is_data_missing():
data = data.interpolate()
historical_data.extend(data)
# extend has been done, now remove the interpolated data
data = TimeSeriesData(
pd.DataFrame(
{
"time": [
historical_data.time.iloc[i]
for i in range(len(historical_data))
if historical_data.time.iloc[i] in original_time_list
],
"value": [
historical_data.value.iloc[i]
for i in range(len(historical_data))
if historical_data.time.iloc[i] in original_time_list
],
}
),
use_unix_time=True,
unix_time_units="s",
tz="US/Pacific",
)
return data
# inconsistently.
def fit_predict(
self,
data: TimeSeriesData,
historical_data: Optional[TimeSeriesData] = None,
**kwargs: Any,
) -> AnomalyResponse:
"""Finds changepoints and returns score.
Uses the current data and historical data to find the changepoints, and
returns an AnomalyResponse object, the scores corresponding to probability
of changepoints.
Args:
data: TimeSeriesData object representing the data
historical_data: TimeSeriesdata object representing the history. Dats
should start exactly where the historical_data ends.
Returns:
AnomalyResponse object, representing the changepoint probabilities. The
score property contains the changepoint probabilities. The length of
the object is the same as the length of the data.
"""
self.last_N = len(data)
# if there is historical data
# we prepend it to data, and run
# the detector as if we only saw data
if historical_data is not None:
data = self._handle_missing_data_extend(data, historical_data)
bocpd_model = BOCPDetector(data=data)
if not self.slow_drift:
if self.threshold is not None:
_ = bocpd_model.detector(
model=BOCPDModelType.NORMAL_KNOWN_MODEL,
choose_priors=True,
agg_cp=True,
threshold=self.threshold,
)
else:
_ = bocpd_model.detector(
model=BOCPDModelType.NORMAL_KNOWN_MODEL,
choose_priors=True,
agg_cp=True,
)
else:
if self.threshold is not None:
_ = bocpd_model.detector(
model=BOCPDModelType.NORMAL_KNOWN_MODEL,
choose_priors=True,
agg_cp=True,
threshold=self.threshold,
)
else:
_ = bocpd_model.detector(
model=BOCPDModelType.TREND_CHANGE_MODEL,
choose_priors=False,
agg_cp=True,
)
change_prob_dict = bocpd_model.get_change_prob()
change_prob = list(change_prob_dict.values())[0]
# construct the object
N = len(data)
default_ts = TimeSeriesData(time=data.time, value=pd.Series(N * [0.0]))
score_ts = TimeSeriesData(time=data.time, value= | pd.Series(change_prob) | pandas.Series |
# -*- coding: utf-8 -*-
"""DataFrame client for InfluxDB."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
from collections import defaultdict
import pandas as pd
from .client import InfluxDBClient
from .line_protocol import _escape_tag
def _pandas_time_unit(time_precision):
unit = time_precision
if time_precision == 'm':
unit = 'ms'
elif time_precision == 'u':
unit = 'us'
elif time_precision == 'n':
unit = 'ns'
assert unit in ('s', 'ms', 'us', 'ns')
return unit
def _escape_pandas_series(s):
return s.apply(lambda v: _escape_tag(v))
class DataFrameClient(InfluxDBClient):
"""DataFrameClient instantiates InfluxDBClient to connect to the backend.
The ``DataFrameClient`` object holds information necessary to connect
to InfluxDB. Requests can be made to InfluxDB directly through the client.
The client reads and writes from pandas DataFrames.
"""
EPOCH = pd.Timestamp('1970-01-01 00:00:00.000+00:00')
def write_points(self,
dataframe,
measurement,
tags=None,
tag_columns=None,
field_columns=None,
time_precision=None,
database=None,
retention_policy=None,
batch_size=None,
protocol='line',
numeric_precision=None):
"""Write to multiple time series names.
:param dataframe: data points in a DataFrame
:param measurement: name of measurement
:param tags: dictionary of tags, with string key-values
:param time_precision: [Optional, default None] Either 's', 'ms', 'u'
or 'n'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
:param protocol: Protocol for writing data. Either 'line' or 'json'.
:param numeric_precision: Precision for floating point values.
Either None, 'full' or some int, where int is the desired decimal
precision. 'full' preserves full precision for int and float
datatypes. Defaults to None, which preserves 14-15 significant
figures for float and all significant figures for int datatypes.
"""
if tag_columns is None:
tag_columns = []
if field_columns is None:
field_columns = []
if batch_size:
number_batches = int(math.ceil(len(dataframe) / float(batch_size)))
for batch in range(number_batches):
start_index = batch * batch_size
end_index = (batch + 1) * batch_size
if protocol == 'line':
points = self._convert_dataframe_to_lines(
dataframe.iloc[start_index:end_index].copy(),
measurement=measurement,
global_tags=tags,
time_precision=time_precision,
tag_columns=tag_columns,
field_columns=field_columns,
numeric_precision=numeric_precision)
else:
points = self._convert_dataframe_to_json(
dataframe.iloc[start_index:end_index].copy(),
measurement=measurement,
tags=tags,
time_precision=time_precision,
tag_columns=tag_columns,
field_columns=field_columns)
super(DataFrameClient, self).write_points(
points,
time_precision,
database,
retention_policy,
protocol=protocol)
return True
if protocol == 'line':
points = self._convert_dataframe_to_lines(
dataframe,
measurement=measurement,
global_tags=tags,
tag_columns=tag_columns,
field_columns=field_columns,
time_precision=time_precision,
numeric_precision=numeric_precision)
else:
points = self._convert_dataframe_to_json(
dataframe,
measurement=measurement,
tags=tags,
time_precision=time_precision,
tag_columns=tag_columns,
field_columns=field_columns)
super(DataFrameClient, self).write_points(
points,
time_precision,
database,
retention_policy,
protocol=protocol)
return True
def query(self,
query,
params=None,
epoch=None,
expected_response_code=200,
database=None,
raise_errors=True,
chunked=False,
chunk_size=0,
dropna=True):
"""
Quering data into a DataFrame.
:param query: the actual query string
:param params: additional parameters for the request, defaults to {}
:param epoch: response timestamps to be in epoch format either 'h',
'm', 's', 'ms', 'u', or 'ns',defaults to `None` which is
RFC3339 UTC format with nanosecond precision
:param expected_response_code: the expected status code of response,
defaults to 200
:param database: database to query, defaults to None
:param raise_errors: Whether or not to raise exceptions when InfluxDB
returns errors, defaults to True
:param chunked: Enable to use chunked responses from InfluxDB.
With ``chunked`` enabled, one ResultSet is returned per chunk
containing all results within that chunk
:param chunk_size: Size of each chunk to tell InfluxDB to use.
:param dropna: drop columns where all values are missing
:returns: the queried data
:rtype: :class:`~.ResultSet`
"""
query_args = dict(params=params,
epoch=epoch,
expected_response_code=expected_response_code,
raise_errors=raise_errors,
chunked=chunked,
chunk_size=chunk_size)
results = super(DataFrameClient, self).query(query, **query_args)
if query.strip().upper().startswith("SELECT"):
if len(results) > 0:
return self._to_dataframe(results, dropna)
else:
return {}
else:
return results
def _to_dataframe(self, rs, dropna=True):
result = defaultdict(list)
if isinstance(rs, list):
return map(self._to_dataframe, rs)
for key, data in rs.items():
name, tags = key
if tags is None:
key = name
else:
key = (name, tuple(sorted(tags.items())))
df = pd.DataFrame(data)
df.time = pd.to_datetime(df.time)
df.set_index('time', inplace=True)
df.index = df.index.tz_localize('UTC')
df.index.name = None
result[key].append(df)
for key, data in result.items():
df = | pd.concat(data) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 5 02:12:12 2022
@author: Kraken
Project: MHP Hackathon
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 16})
WORKING_DIR = "model_14"
WORKING_DIR2 = "model_12"
# "model_8": dqn with fixed weights
# "model_4": dqn
MVG_AVG_WINDOW = 5
# =============================================================================
# Queue Plots - Combined
# =============================================================================
QUEUE = "plot_queue_data.txt"
# rl agent
with open(os.path.join(WORKING_DIR, QUEUE), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data][:250]
# ctl
with open(os.path.join(WORKING_DIR2, QUEUE), "r") as txtfile:
data2 = txtfile.readlines()
data2 = [float(x.rstrip("\n")) for x in data2]
fig = plt.figure(figsize=(12, 8))
plt.plot(data2, "blue", label="Conventional Traffic Lights")
plt.plot(data, "orange", label="RL Agent")
plt.xlabel("# Episodes")
plt.ylabel("Average queue length (vehicles)")
plt.title("Conventional Traffic Lights & RL Optimized Smart Traffic Lights")
plt.grid()
plt.legend(loc="upper right")
plt.savefig(QUEUE.replace("_data.txt", "_combined.png"))
# =============================================================================
# Delay Plots - Combined
# =============================================================================
QUEUE = "plot_delay_data.txt"
# rl agent
with open(os.path.join(WORKING_DIR, QUEUE), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data][:250]
# ctl
with open(os.path.join(WORKING_DIR2, QUEUE), "r") as txtfile:
data2 = txtfile.readlines()
data2 = [float(x.rstrip("\n")) for x in data2]
fig = plt.figure(figsize=(12, 8))
plt.plot(data, "orange", label="RL Agent")
plt.plot(data2, "blue", label="Conventional Traffic Lights")
plt.xlabel("# Episodes")
plt.ylabel("Cumulative Delay (s)")
plt.title("Conventional Traffic Lights & RL Optimized Smart Traffic Lights")
plt.grid()
plt.legend(loc="upper right")
plt.savefig(QUEUE.replace("_data.txt", "_combined.png"))
# =============================================================================
# Reward Plots - Combined
# =============================================================================
QUEUE = "plot_reward_data.txt"
# rl agent
with open(os.path.join(WORKING_DIR, QUEUE), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data][:250]
# ctl
with open(os.path.join(WORKING_DIR2, QUEUE), "r") as txtfile:
data2 = txtfile.readlines()
data2 = [float(x.rstrip("\n")) for x in data2]
fig = plt.figure(figsize=(12, 8))
plt.plot(data, "orange", label="RL Agent")
plt.plot(data2, "blue", label="Conventional Traffic Lights")
plt.xlabel("# Episodes")
plt.ylabel("Cumulative Negative Reward")
plt.title("Conventional Traffic Lights & RL Optimized Smart Traffic Lights")
plt.grid()
plt.legend(loc="best")
plt.savefig(QUEUE.replace("_data.txt", "_combined.png"))
WORKING_DIR = "model_14"
MVG_AVG_WINDOW = 5
# =============================================================================
# Queue Plots
# =============================================================================
QUEUE = "plot_queue_data.txt"
with open(os.path.join(WORKING_DIR, QUEUE), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data]
data_series = pd.Series(data).rolling(MVG_AVG_WINDOW).mean().tolist()
first_value = data_series[MVG_AVG_WINDOW - 1]
last_value = data_series[-1]
perc_decrease = (first_value - last_value) / first_value * 100
fig = plt.figure(figsize=(12, 8))
plt.plot(data)
plt.plot(data_series, "r")
plt.xlabel("# Episodes")
plt.ylabel("Average queue length (vehicles)")
plt.title(f"Decrease: {first_value:.2f} -> {last_value:.2f} = {perc_decrease:.2f}%")
plt.savefig(os.path.join(WORKING_DIR, QUEUE.replace("_data.txt", "_new.png")))
# =============================================================================
# Delay Plots
# =============================================================================
DELAY = "plot_delay_data.txt"
with open(os.path.join(WORKING_DIR, DELAY), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data]
data_series = | pd.Series(data) | pandas.Series |
# jupyter nbconvert ouxml/OU_XML2md_Converter.ipynb --to script --template cleaner_py.tpl
# black ouxml/*.py#!/usr/bin/env python
# coding: utf-8
# #!pip3 install markdownify
from bs4 import BeautifulSoup
from markdownify import markdownify as md
from pkg_resources import resource_string
import lxml.html
from lxml import etree
from pathlib import Path
import base64
# ##!pip3 install oyaml
import oyaml as yaml
def checkDirPath(path):
Path(path).mkdir(parents=True, exist_ok=True)
# Do some setup
import os
# Py3.7 supports ordered_dict natively?
import collections
import re
import glob
# If it looks like the file is down a directory path, make sure the path is there
# If it isn't, the XSLT won't work when it tries to write the output files...
def check_outdir(output_path_stub):
path = output_path_stub.split("/")
if len(path) > 1:
dirpath = "/".join(path[:-1])
if not os.path.exists(dirpath):
os.makedirs(dirpath)
import sqlite3
from sqlite_utils import Database
import pandas as pd
from lxml import etree
def get_file(fn):
"""Get file content from local store."""
# This should work locally or in package
try:
txt = resource_string(__name__, fn)
except:
txt = open(fn).read()
return txt
def get_xslt():
"""Return xlst file as text."""
# TO DO - it would be better if the following accepted an XML string or the path to an XML file
def transform_xml2md(xml, xslt="xslt/ouxml2md.xslt", output_path_stub=""):
"""Take an OU-XML document as a string
and transform the document to one or more markdown files."""
if xml.endswith('.xml') and Path(xml).is_file():
with open(xml, 'r') as f:
xml = f.read()
check_outdir(output_path_stub)
_xslt = get_file(xslt)
xslt_doc = etree.fromstring(_xslt)
xslt_transformer = etree.XSLT(xslt_doc)
source_doc = etree.fromstring(xml.encode("utf-8"))
# It would be handy if we could also retrieve what files the transformer generated?
# One way of doing this might be to pop everything into a temporary directory
# and then parse the contents of that directory into a database table?
output_doc = xslt_transformer(
source_doc, filestub=etree.XSLT.strparam("{}".format(output_path_stub))
)
def transformer(conn, key, val, output_path_stub="testout"):
"""Grab XML and trasnform it to individual markdown files and toc file."""
check_outdir(output_path_stub)
# key / val is something like url / 1432311 ie a view resource ID
dummy_xml = pd.read_sql(
"SELECT * FROM htmlxml WHERE {} LIKE '%{}%'".format(key, val), conn
)["xml"]
# If there is more than one XML file returned, just go with the first one for now
# TO DO - improve this behaviour if multiple files are returned
dummy_xml = dummy_xml[0]
# Generate individual markdown files from sessions
transform_xml2md(dummy_xml, xslt="xslt/ouxml2md.xslt", output_path_stub=output_path_stub)
# Generate table of contents as Unit_toc.md
#transform_xml2md(dummy_xml, xslt="xslt/ouxml2toc.xslt", output_path_stub=output_path_stub)
def _post_process(output_dir_path):
# postprocess
if os.path.exists(output_dir_path):
for fn in [f for f in os.listdir(output_dir_path) if re.match(".*\.md$", f)]:
fnp = os.path.join(output_dir_path, fn)
with open(fnp) as f:
txt = _txt = f.read()
# Do postprocess step(s)
# Get rid of excess end of lines
txt = re.sub(r"[\r\n][\r\n]{2,}", "\n\n", txt)
# Get rid of excess end of lines in code blocks
txt = re.sub(r"```python[\r\n]{2,}", "```python\n", txt)
# Optionally rewrite the supplied markdown file with re-referenced image links
if txt != _txt:
print("Rewriting {}".format(fnp))
with open(fnp, "w") as f:
f.write(txt)
# TO DO - we need a better form of pattern matching and rewriting
# to allow more flexibility in the patterned naming of created directories and renamed files
def _directory_processor(srcdir, new_suffix="part_"):
"""Take filenames of the form stub_WW_NN.md in a flat directory and map them to
filenames of form the {new_suffix}_WW/stub_WW_NN.md in the same directory."""
weeks = []
srcdir = srcdir.rstrip("/")
for f in os.listdir(srcdir):
# for example, stub_00_01.md
w = f.split("_")[1]
# gives w as 00
if w not in weeks:
weeks.append(w)
for w in weeks:
newdir = f"{srcdir}/{new_suffix}{w}"
# for example, testdir/week_00
if os.path.isdir(newdir):
print(f"{newdir} already exists...")
else:
os.makedirs(newdir)
for f in os.listdir(srcdir):
if f.endswith(".md"):
# For example stub_00_01.md
w = f.split("_")[1]
# for example w as 00
os.rename(f"{srcdir}/{f}", f"{srcdir}/{new_suffix}{w}/{f}")
# so testdir/stub_00_01.md becomes testdir/week_00/stub_00_01.md
# https://stackoverflow.com/a/29280824/454773
import markdown
from markdown.treeprocessors import Treeprocessor
from markdown.extensions import Extension
# First create the treeprocessor
class ImgExtractor(Treeprocessor):
def run(self, doc):
"Find all images and append to markdown.images. "
self.md.images = []
for image in doc.findall(".//img"):
self.md.images.append(image.get("src"))
# Then tell markdown about it
class ImgExtExtension(Extension):
def extendMarkdown(self, md, md_globals):
img_ext = ImgExtractor(md)
md.treeprocessors.add("imgext", img_ext, ">inline")
# Finally create an instance of the Markdown class with the new extension
md = markdown.Markdown(extensions=[ImgExtExtension()])
def get_imgkeys_from_md(md_raw=None, md_filepath=None):
"""Generate imgkeys set for image paths in markdown file."""
if md_raw is None and md_filepath is not None:
md_raw = open(md_filepath).read()
html = md.convert(md_raw)
# The img URLs may be in one of two forms:
imgkeys = {}
# \\\\DCTM_FSS\\content\\Teaching and curriculum\\Modules\\T Modules\\TM112\\TM112 materials\\Block 1 e1\\Block 1 Part 1\\_Assets\\tm112_intro_table_01.eps
imgkeys.update({u: u.split("\\")[-1] for u in md.images if "\\" in u})
# https://openuniv.sharepoint.com/sites/tmodules/tm112/block2e1/tm112_blk02_pt04_f07.tif
imgkeys.update({u: u.split("/")[-1] for u in md.images if "/" in u})
return imgkeys
def generate_imgdict(imgkeys, DB):
"""Automatically generate an imgdict that maps agains links in a markdown file."""
q = """
SELECT DISTINCT xurl, h.stub as p , b64image
FROM htmlfigures h JOIN xmlfigures x JOIN imagetest i
WHERE x.minstub=h.minstub
AND i.stub=h.stub
AND x.stub in ({});
""".format(
", ".join(['"{}"'.format(imgkeys[k]) for k in imgkeys])
)
tmp_img = pd.read_sql(q, DB.conn)
imgdict = tmp_img.set_index("xurl").to_dict()["p"]
# Return the dataframe from which we can save the images to disk
return tmp_img, imgdict
# OU_Course_Material_Assets.ipynb currently has scraper for getting a database together
def _crossmatch_xml_html_links(imgdict, fn, imgdirpath="", rewrite=False):
""" Try to reconcile XML paths to HTML image paths in supplied markdown file. """
# Open the markdown file
with open(fn) as f:
txt = _txt = f.read()
# Replace image references with actual image links
for k in imgdict:
# print(k.lstrip('\\'))
txt = txt.replace(
k.lstrip("\\"), "{}".format(os.path.join(imgdirpath, imgdict[k]))
)
# Optionally rewrite the supplied markdown file with re-referenced image links
if rewrite and (txt != _txt):
print("Rewriting {}".format(fn))
with open(fn, "w") as f:
f.write(txt)
# Return the rewritten markdown
return txt
def crossmatch_xml_html_links(
imgdict, imgdirpath="", contentdir=".", content_prefix=""
):
""" For markdown files in a content directory, rewrite matched image URLs. """
# Detect markdown files
candidate_files = [
"{}/{}".format(contentdir, f)
for f in os.listdir(contentdir)
if re.match(".*\.md$", f)
]
if content_prefix:
candidate_files = [f for f in candidate_files if f.startswith(content_prefix)]
for fn in candidate_files:
print("Handling {}".format(fn))
_ = _crossmatch_xml_html_links(imgdict, fn, imgdirpath, rewrite=True)
# We could save the md files to a table here, and perhaps also convert to ipynb in same table?
def save_image_from_db(x, imgdir="testimages"):
""" Save image in db to file. """
img_data = x["b64image"] # sql("SELECT * FROM imagetest LIMIT 1;")[0]['b64image']
fn = "{}/{}".format(imgdir, x["p"])
with open(fn, "wb") as f:
f.write(base64.decodebytes(img_data))
return fn
# OpenLearn image map
# For openlearn units, we just have the xml and imagetest tables
# Rewrite imagelinks as stubs
# BUT - we need some sort of secret for the image file dereferencing to work?
# save images
def openlearn_image_mapper(dbname, _basedir="oumd_demo3", _imgdir="testimages"):
def _generate_imgdict(DB, imgkeys):
"""Automatically generate an imgdict that maps agains links in a markdown file."""
q = """
SELECT DISTINCT srcurl, x.stub as p, b64image
FROM xmlfigures x JOIN imagetest i
WHERE x.minstub=i.minstub AND x.minstub in ({});
""".format(
", ".join(['"{}"'.format(imgkeys[k].split('.')[0]) for k in imgkeys])
)
tmp_img = | pd.read_sql(q, DB.conn) | pandas.read_sql |
import logging
from datetime import date, timedelta
from typing import Dict
import pandas as pd
from databand import parameters
from dbnd import PipelineTask, output, parameter, task
from dbnd.testing.helpers_pytest import assert_run_task
from dbnd_test_scenarios.test_common.task.factories import TTask
class DummyTask(TTask):
float_param = parameters.FloatParameter()
expected_param = parameters.FloatParameter()
timedelta_param = parameters.TimeDeltaParameter()
expected_timedelta_param = parameters.TimeDeltaParameter()
def run(self):
assert isinstance(self.float_param, float)
assert isinstance(self.timedelta_param, timedelta)
assert self.float_param == self.expected_param
assert self.timedelta_param == self.expected_timedelta_param
super(DummyTask, self).run()
class DummyWrapper(PipelineTask):
defaults = {DummyTask.float_param: "0.1", DummyTask.timedelta_param: "4d"}
output = output
def band(self):
self.output = DummyTask().t_output
@task
def inline_df_without_typing(df_a, df_b):
return df_a.head(2), df_b.head(2)
@task
def parent_ab_without_typing(iterations=1):
a = pd.DataFrame(data=[[1, 1]] * 5, columns=["c1", "c2"])
b = | pd.DataFrame(data=[[1, 1]] * 5, columns=["c1", "c2"]) | pandas.DataFrame |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL0", "CL1"])
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')
])
weights2 = pd.DataFrame(vals, index=widx, columns=["CO0", "CO1"])
weights = {"CL": weights1, "CO": weights2}
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15, 0.1, 0.15],
[0.075, 0.45, 0.075, 0.25],
[-0.5, 0.2, pd.np.NaN, pd.np.NaN]],
index=weights["CL"].index.levels[0],
columns=['CL0', 'CL1', 'CO0', 'CO1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_instr_rets_key_error():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5')])
irets = pd.Series([0.02, 0.01, 0.012], index=idx)
vals = [1, 1/2, 1/2, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
( | TS('2015-01-05') | pandas.Timestamp |
import csv
import sys
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import pandas as pd
import json
from os import listdir
from os.path import isfile, join
import re
monnomdistances={'C':0,'I':0,'D':1,'J':1,'K':2,'L':1,'M':2,'S':1,'T':2}
markersize=8
linewidth=3
markerstyles = {'MonNom':{'color':'#000000', 'symbol':'x','size':markersize+2},
'Nom':{'color':'#00B050', 'symbol':'cross','size':markersize},
'Proxied Grift':{'color':'#ED7D31', 'symbol':'arrow-up','size':markersize},
'Monotonic Grift':{'color':'#ED7D31', 'symbol':'diamond-open','size':markersize},
'Racket':{'color':'#4472C4', 'symbol':'circle-open','size':markersize},
'C#':{'color':'#264478', 'symbol':'diamond','size':markersize},
'Java':{'color':'#7030A0', 'symbol':'diamond-wide','size':markersize+3},
'NodeJS':{'color':'#9E480E', 'symbol':'circle','size':markersize},
'HiggsCheck':{'color':'#C00000', 'symbol':'arrow-up','size':markersize},
'Reticulated':{'color':'#B21E6F', 'symbol':'circle-open','size':markersize}}
linestyles = {'MonNom':{'color':'#000000', 'width':linewidth},
'Nom':{'color':'#00aa00', 'dash':'dash', 'width':linewidth},
'Proxied Grift':{'color':'#ED7D31', 'dash':'longdash', 'width':linewidth},
'Monotonic Grift':{'color':'#ED7D31', 'dash':'dashdot', 'width':linewidth},
'Racket':{'color':'#4472C4', 'dash':'dot', 'width':linewidth},
'C#':{'color':'#264478', 'dash':'dot', 'width':linewidth},
'Java':{'color':'#7030A0', 'dash':'dot', 'width':linewidth},
'NodeJS':{'color':'#9E480E', 'dash':'dot', 'width':linewidth},
'HiggsCheck':{'color':'#C00000', 'dash':'dot', 'width':linewidth},
'Reticulated':{'color':'#B21E6F', 'dash':'dot', 'width':linewidth}}
def distance_to_fully_typed(config):
ret=0
for c in config:
ret+=monnomdistances[c]
return ret
def combine_funcs(f1,f2):
return lambda x: f2(f1(x))
def cut_dotbm(str):
return str[4:]
def fetch_key(key,data):
try:
if(key.isdigit()):
return data.loc[int(key)][0]
else:
return data.loc[key][0]
except KeyError:
return "REMOVE"
def load_converter(path):
data= | pd.read_csv(path,index_col=0) | pandas.read_csv |
import string
import numpy as np
import re
import random
import pandas as pd
import os
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
label_encoder = LabelEncoder()
def clean_str_new(s):
"""
Adapted from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py for Italian
"""
s = re.sub(r"\'s", "", s)
s = re.sub(r"\'ve", "have", s)
s = re.sub(r"n\'t", " not", s)
s = re.sub(r"\'re", " are", s)
s = re.sub(r"\'d", " would", s)
s = re.sub(r"\'ll", " will", s)
s = re.sub(r"\'", " ", s)
punc = re.compile('[%s]' % re.escape(string.punctuation))
s = punc.sub('', s) # removes punctuation, not accents
DIGITS = re.compile("[0-9]", re.UNICODE)
s = DIGITS.sub("#", s)
s = re.sub(r"\s{2,}", " ", s)
s = s.lower()
s = s.strip()
return s
def remove_double_spaces(s):
s = re.sub(r"\s{2,}", " ", s)
return s
s = '<NAME>or, is from 3.2. But'
def clean_str(s):
"""
Tokenization/s cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
s = re.sub(r"[^\\p{L}\\s]", " ", s) # This removes accents, which we want.
s = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", s) #This removes accents, which we want.
s = re.sub(r"\'s", "", s)
s = re.sub(r"\'ve", "have", s)
s = re.sub(r"n\'t", " not", s)
s = re.sub(r"\'re", " are", s)
s = re.sub(r"\'d", " would", s)
s = re.sub(r"\'ll", " will", s)
s = re.sub(r",", "", s) #s = re.sub(r",", " ,", s)
s = re.sub(r"!", "", s)
# s = re.sub(r"\(", "\(", s)
# s = re.sub(r"\)", "\) ", s)
s = re.sub(r"\?", "", s)
s = re.sub(r"\s{2,}", " ", s)
s = re.sub(r" ", " ", s)
return s.strip().lower()
def load_output_layers(path_to_dir):
'''These are the output_layers of the Xvalidation set, 100 sentences per 130 categories. Good for RSMs'''
loaded = np.load(path_to_dir+'output_layers.npz')
layers = []
layer1= pd.DataFrame(loaded['a'])
layers.append(layer1)
layer2 = pd.DataFrame(loaded['b'])
layers.append(layer2)
try:
layer3 = pd.DataFrame(loaded['c'])
layers.append(layer3)
except: pass
try:
layer4 = pd.DataFrame(loaded['d'])
layers.append(layer4)
except: pass
try:
layer5 = pd.DataFrame(loaded['e'])
layers.append(layer5)
except: pass
try:
layer6 = pd.DataFrame(loaded['f'])
layers.append(layer6)
except: pass
try:
layer7 = pd.DataFrame(loaded['g'])
layers.append(layer7)
except: pass
try:
layer8 = pd.DataFrame(loaded['h'])
layers.append(layer8)
except: pass
try:
layer9 = | pd.DataFrame(loaded['i']) | pandas.DataFrame |
from .base import Transformer
import pandas as pd
import numpy as np
import os
ISO_COUNTRY_CODES = os.path.join(os.path.dirname(__file__), 'countrycodes.csv')
class UNTransformer(Transformer):
""" Data source specific transformers """
def __init__(self, source, target):
super().__init__(source, target)
self.iso = pd.read_csv(ISO_COUNTRY_CODES,
usecols=[0, 2, 3],
names=['name', 'iso3', 'country-code'],
header=0)
def read(self):
try:
self.tot_df = pd.read_excel(self.source[0],
usecols="E:BS",
skiprows=16)
self.male_df = pd.read_excel(self.source[1],
usecols="E:BS",
skiprows=16)
self.female_df = pd.read_excel(self.source[2],
usecols="E:BS",
skiprows=16)
self.growth_rate_df = pd.read_excel(self.source[3],
usecols="E:R",
skiprows=16)
except FileNotFoundError as exc:
raise ValueError("Source file {} not found.".format(self.source)) \
from exc
def write(self):
self.df.to_csv(self.target, mode='w', index=False)
def transform(self):
# 1. Total Population
un_df_tot = pd.melt(self.tot_df,
id_vars=['Country code'],
var_name='year')
un_df_tot["Indicator Code"] = "UN.TOTL.POP"
un_df_tot["Indicator Name"] = "UN total population"
# 2. Total Male Population
un_df_male = pd.melt(self.male_df,
id_vars=['Country code'],
var_name='year')
un_df_male["Indicator Code"] = "UN.TOTL.POP_MALE"
un_df_male["Indicator Name"] = "UN Total Male Population"
# 3. Total Female Population
un_df_female = pd.melt(self.male_df,
id_vars=['Country code'],
var_name='year')
un_df_female["Indicator Code"] = "UN.TOTL.POP_FEMALE"
un_df_female["Indicator Name"] = "UN Total Female Population"
un_df = un_df_tot.append(un_df_male).append(un_df_female)
# map country codes
td = pd.merge(un_df, self.iso, how='left', left_on='Country code', right_on='country-code')
td.drop(['country-code'], axis=1, inplace=True)
td.value = td.value*1000 # UN Data in 1000's
self.df = td
# 4. Population Growth Rate
self.growth_rate_df.rename(columns=lambda x: x.split("-")[0], inplace=True)
growth_rate_master = self.growth_rate_df.drop('Country code', axis=1)
# Growth for every five years. Copying out the value for the five years
count = 0
while (count < 4):
count = count + 1
growth_rate_copy = growth_rate_master.copy()
growth_rate_copy.rename(columns=lambda x: str(int(x)+count), inplace=True)
self.growth_rate_df = | pd.concat([self.growth_rate_df, growth_rate_copy], axis=1, sort=False) | pandas.concat |
"""
This script generates a train/test database on the basis of the given percentage
it takes the images and the annotations written on the same folder, it shuffles them,
then copy into the upper train/test folders and create a relative csv file to manipulate
with TensorFlow.
More details are coming with the code.
"""
import pandas as pd
import os
import xml.etree.ElementTree as ET
from PIL import Image
import pyprind
from random import shuffle
from personal_errors import InputError, OutputError
from dataset_costants import \
TRAINING_PERCENTAGE, \
TABLE_DICT, \
ANNOTATIONS_EXTENSION, \
PATH_TO_ANNOTATIONS, \
TRAIN_CSV_TO_PATH, \
TRAIN_CSV_NAME, \
TEST_CSV_TO_PATH, \
TEST_CSV_NAME, \
PATH_TO_IMAGES, \
IMAGES_EXTENSION, \
MIN_HEIGHT_BOX, \
MIN_WIDTH_BOX
import logging
from logger import TimeHandler
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(TimeHandler().handler)
def get_file_list_per_extension(path, ext):
"""
Returns the folder and the list of the files with the 'ext' extension in the 'path' folder
:param path:
:param ext:
:return: path list
"""
ext_list = []
for (gen_path, file_paths, file_names) in os.walk(path):
for file in file_names:
if file.endswith(ext):
ext_list.append(file)
return gen_path, ext_list
def sanitize_coord(coordinates, width, height):
"""
points are: [[xmin, ymin], [xmax, ymin], [xmin, ymax], [xmax, ymax]]
it sanitize the coordinates that are extracted from a xml file. Valid for this dataset,
to be updated in case the dataset changes
Returning as dict: xmin, ymin, xmax, ymax
:param coordinates:[[xmin, ymin], [xmax, ymin], [xmin, ymax], [xmax, ymax]]
:return: dict with xmin, ymin, xmax, ymax coordinates
"""
coordinates = coordinates.split()
points = []
for point in coordinates:
point = point.split(',')
points.append(point)
new_points = {
'xmin': points[0][0],
'ymin': points[0][1],
'xmax': points[3][0],
'ymax': points[3][1]
}
# logger.info(new_points)
# logger.info('width: {w}, height: {h}'.format(w=width, h=height))
# check if coords are inverted
if int(new_points['ymin']) > int(new_points['ymax']):
logger.info('I found you y!')
temp = int(new_points['ymin'])
new_points['ymin'] = int(new_points['ymax'])
new_points['ymax'] = temp
if int(new_points['xmin']) > int(new_points['xmax']):
logger.info('I found you x!')
temp = new_points['xmin']
new_points['xmin'] = int(new_points['xmax'])
new_points['xmax'] = temp
if int(new_points['ymin']) < 0:
logger.info('Found some ymin at zero:')
new_points['ymin'] = 0
if int(new_points['xmin']) < 0:
logger.info('Found some xmin at zero')
new_points['xmin'] = 0
if int(new_points['ymax']) > height:
logger.info('Found some ymax beyond height: \nwidth: {w}, height: {h}\nnew_point["ymax"]: {npyx}' \
.format(w=width, h=height, npyx=new_points['ymax']))
new_points['ymax'] = height
if int(new_points['xmax']) > width:
logger.info('Found some xmax beyond height: \nwidth: {w}, height: {h}\nnew_point["xmax"]: {npxx}' \
.format(w=width, h=height, npxx=new_points['xmax']))
new_points['xmax'] = width
if (int(new_points['xmax']) - int(new_points['xmin'])) < MIN_WIDTH_BOX or \
(int(new_points['ymax']) - int(new_points['ymin'])) < MIN_HEIGHT_BOX:
logger.info('Box {} was too small. Going to delete it'.format(new_points))
new_points = None
return new_points
def xml_to_csv(img_folder, img_list, xml_folder, xml_list):
"""
it takes the file list and create a dedicated csv from the provided images with xml
:param img_folder: path to jpeg folder
:param img_list: list of files in jpeg folder
:param xml_folder: path to xml folder
:param xml_list: list of files in xml folder
:return: csv dataframe with the right informations for tensorflow
"""
logger.info('Generating csv from img list...')
xml = []
for img_file in img_list:
bar.update()
is_table = False
img_name = img_file.replace(IMAGES_EXTENSION, '')
xml_file = (img_name + ANNOTATIONS_EXTENSION)
if xml_file not in xml_list:
logger.warning('XML DESCRIPTION FILE NOT FOUND. PLEASE CHECK DATASET')
tree = ET.parse(os.path.join(xml_folder, xml_file))
root = tree.getroot()
# img_name = img_file
width, height = Image.open(os.path.join(img_folder, img_file)).size
value = None
for child in root.findall('.//tableRegion'):
# if table is present, value will report the correct label and the coordinates of the boxes.
# else, value will report img name, width and height but no other informations.
if not is_table:
is_table = True
coords = child.find('.//Coords')
coordinates = coords.get('points')
points = sanitize_coord(coordinates, width, height) # returning as dict: xmin, ymin, xmax, ymax
if points is None:
value = (img_file, 'no_table', 0, 0, 0, 0)
else:
# setting box as percentage of the image. This can be done in generate_tf_records also.
xmin = int(points['xmin'])
ymin = int(points['ymin'])
xmax = int(points['xmax'])
ymax = int(points['ymax'])
value = (img_file, TABLE_DICT['name'], xmin, ymin, xmax, ymax)
xml.append(value)
if not is_table:
value = (img_file, 'no_table', 0, 0, 0, 0)
xml.append(value)
logger.debug('Added new value: {}'.format(value))
logger.info('CSV successfully generated!')
# column_name columns must be remembered while generating tf records
column_name = ['filename', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
xml_df = | pd.DataFrame(xml, columns=column_name) | pandas.DataFrame |
"""
Module to generate learning curves.
"""
import os
import pandas as pd
class Learning_Experiment:
# public
def __init__(self, config_obj, app_obj, util_obj):
self.config_obj = config_obj
self.app_obj = app_obj
self.util_obj = util_obj
def run_experiment(self, test_start=200, test_end=300,
train_start=0, train_end=100,
learn_sizes=[100000], domain='twitter', fold=0,
clf='lr', engine=None, relations=[],
testing_relational=True, super_train=False,
sim_dir=None):
assert train_end > train_start
assert test_start >= train_end
assert test_end > test_start
assert fold >= 0
rel_dir = self.config_obj.rel_dir
out_dir = rel_dir + 'output/' + domain + '/experiments/'
self.util_obj.create_dirs(out_dir)
fold = str(fold)
fn = fold + '_lrn.csv'
train_sizes = [int(x) for x in learn_sizes]
if not testing_relational:
train_sizes = [int(x) for x in learn_sizes]
ranges = self._create_ranges_independent(test_start=test_start,
test_end=test_end,
train_sizes=train_sizes)
else:
val_sizes = [int(x) for x in learn_sizes]
ranges = self._create_ranges_relational(test_start=test_start,
test_end=test_end,
train_start=train_start,
train_end=train_end,
val_sizes=val_sizes)
print(ranges)
rows = []
cols = ['learn_size']
for start, end, train_size, val_size, val_split, lrn_pts in ranges:
row = [lrn_pts]
d = self.app_obj.run(domain=domain, start=start, end=end,
fold=fold, engine=engine, clf=clf,
stacking=0, data='both',
train_size=train_size, val_size=val_size,
val_split=val_split, relations=relations,
sim_dir=sim_dir, super_train=super_train)
if cols == ['learn_size']:
cols.extend(['ind_aupr', 'ind_auroc'])
for model in ['psl', 'mrf']:
for metric in ['aupr', 'auroc']:
if model == engine or engine == 'all':
cols.append(model + '_' + metric)
for model in ['ind', 'psl', 'mrf']:
for metric in ['aupr', 'auroc']:
if d.get(model) is not None:
row.append(d[model][metric])
rows.append(row)
self._write_scores_to_csv(rows, cols=cols, out_dir=out_dir,
fname=fn)
# private
def _clear_data(self, domain='twitter'):
ind_dir = self.config_obj.ind_dir
rel_dir = self.config_obj.rel_dir
fold_dir = ind_dir + '/data/' + domain + '/folds/'
ind_pred_dir = ind_dir + '/output/' + domain + '/predictions/'
rel_pred_dir = rel_dir + '/output/' + domain + '/predictions/'
os.system('rm %s*.csv' % (fold_dir))
os.system('rm %s*.csv' % (ind_pred_dir))
os.system('rm %s*.csv' % (rel_pred_dir))
def _create_ranges_relational(self, test_start=100, test_end=200,
train_start=0, train_end=40,
val_sizes=[10, 20, 30, 60]):
assert train_start >= 0
assert train_start <= train_end
assert test_start >= train_end
assert test_start <= test_end
test_pts = test_end - test_start
train_pts = train_end - train_start
val_pts = test_start - train_end
train_pct = train_pts / (train_pts + val_pts + test_pts)
val_pct = val_pts / (train_pts + val_pts + test_pts)
range_list = []
for i, vp in enumerate(val_sizes):
assert vp <= test_start - train_end
start = train_start
end = test_end
train_size = train_pct
val_size = val_pct
val_split = vp / val_pts
rng = (start, end, train_size, val_size, val_split, vp)
range_list.append(rng)
return range_list
def _create_ranges_independent(self, test_start=100, test_end=200,
train_sizes=[]):
test_size = test_end - test_start
range_list = []
for i, train_size in enumerate(train_sizes):
tp = train_size / (train_size + test_size)
start = test_start - train_size
if start >= 0:
range_list.append((start, test_end, train_size, 0, 0, tp))
return range_list
def _write_scores_to_csv(self, rows, cols=[], out_dir='',
fname='results.csv'):
df = | pd.DataFrame(rows, columns=cols) | pandas.DataFrame |
import pytest
import os
import sys
from typing import List, Tuple
import pandas as pd
import torch
from torch import Tensor
sys.path.append(os.path.join(os.getcwd(), 'phishGNN'))
from dataset import PhishingDataset
def dataframe_mock(rows: List[Tuple[str, List, str]]):
refs = [[{"url": ref, "nb_edges": 1} for ref in row[1]] for row in rows]
urls = [row[0] for row in rows]
features = [
# 'depth',
'is_phishing',
'redirects',
'is_https',
'is_ip_address',
'is_error_page',
'url_length',
'domain_url_depth',
'domain_url_length',
'has_sub_domain',
'has_at_symbol',
'dashes_count',
'path_starts_with_url',
'is_valid_html',
'anchors_count',
'forms_count',
'javascript_count',
'self_anchors_count',
'has_form_with_url',
'has_iframe',
'use_mouseover',
'is_cert_valid',
'has_dns_record',
'has_whois',
'cert_reliability',
'domain_age',
'domain_end_period',
]
features = {feat: [-1 for _ in range(len(rows))] for feat in features}
features["redirects"] = [row[2] for row in rows]
data = {
'url': urls,
**features,
'refs': refs,
}
df = | pd.DataFrame(data=data) | pandas.DataFrame |
# Calculate parameters from counts
# Draw FD by using the special points of fundamental diagrams
import pandas as pd
import geopandas as gpd
import numpy as np
import pickle
import matplotlib.pyplot as plt
from tqdm import tqdm
from tqdm.contrib import tenumerate
import collections
import time
import copy
from scipy.optimize import curve_fit
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.preprocessing import PolynomialFeatures
# from statsmodels.graphics.gofplots import qqplot
import operator
import scipy
import geopy.distance as geodist
import leuvenmapmatching.util.dist_latlon as distlatlon
import leuvenmapmatching.util.dist_euclidean as distxy
def vehicle_crossings(gdf_traj, d_gdf, bearing_difference=90, strict_match=True):
tic = time.time()
print('Start: …searching crossings')
assert isinstance(gdf_traj, gpd.GeoDataFrame)
assert {'n_det', 'lonlat'}.issubset(set(d_gdf.attrs.keys()))
assert {'line_length_latlon', 'line_length_yx'}.issubset(set(gdf_traj.columns))
n_det = d_gdf.attrs['n_det']
lonlat = d_gdf.attrs['lonlat']
c1, c2 = 'x', 'y'
if lonlat:
c1, c2 = 'lon', 'lat'
# column multi-index
col_names = [[f'cross_{c1}{i}', f'cross_{c2}{i}', f'rid{i}',
f'd{i}', f't{i}', f'v{i}'] for i in range(1, n_det + 1)]
col_names = [item for sublist in col_names for item in sublist]
feature = False
if 'det_signal1' in d_gdf.columns:
col_names = [[f'cross_{c1}_ts{i}', f'cross_{c2}_ts{i}', f'rid_ts{i}',
f'd_ts{i}', f't_ts{i}', f'v_ts{i}'] for i in range(1, n_det + 1)]
col_names = [item for sublist in col_names for item in sublist]
tuples = [(i[0], i[1], col_names[v]) for i, j in d_gdf.iterrows() for v in range(0, 6)]
col_index = pd.MultiIndex.from_tuples(tuples, names=['edge', 'node', 'detector'])
d_gdf = d_gdf.reset_index()
detector_column = 'det_signal'
feature = True
else:
col_index = pd.MultiIndex.from_product([d_gdf['_id'], col_names], names=['edge', 'detector'])
detector_column = 'det_edge_'
if isinstance(gdf_traj.index, pd.MultiIndex):
assert gdf_traj.index.names.index('track_id') == 0
row_index = set(gdf_traj.index.get_level_values(0))
else:
row_index = set(gdf_traj['track_id'])
gdf_traj.set_index(['track_id', 'rid'], inplace=True)
p1 = list(zip(*(gdf_traj[f'{c2}_1'], gdf_traj[f'{c1}_1'])))
gdf_traj['p1'] = p1
df_result = pd.DataFrame(index=row_index, columns=col_index)
df = gdf_traj
for i, det_link in tqdm(d_gdf.iterrows(), total=d_gdf.shape[0]): # Counting vehicles for every used edge
df_bool_wm = df[['wm1', 'wm2']].values < bearing_difference
df_wm = df[np.logical_and(df_bool_wm[:, 0], df_bool_wm[:, 1])]
if strict_match:
df_bool_edge = df_wm[['u_match', 'v_match']].values == det_link['_id']
else:
df_bool_edge = df_wm[['u_match', 'v_match']].values >= 0
df_u = df_wm[df_bool_edge[:, 0]].index.to_list()
df_v = df_wm[df_bool_edge[:, 1]].index.to_list()
set_index = set(df_u + df_v)
if len(set_index) == 0:
continue
df2 = df_wm.loc[set_index]
for n in range(1, n_det + 1):
df_search = df2['geometry'].values.intersects(det_link[f'{detector_column}{n}'])
df_intersect = df2[df_search].index.to_list()
if not df_intersect:
continue
tid, rid = zip(*df_intersect)
df_search_cross = df2[df_search]
df_cross = df_search_cross['geometry'].values.intersection(det_link[f'{detector_column}{n}'])
df_cross = [(c.y, c.x) for c in df_cross]
if lonlat:
df_dist = np.array([round(distlatlon.distance(*yx), 3) for yx in zip(df_search_cross.p1, df_cross)])
else:
df_dist = np.array([round(distxy.distance(*yx), 3) for yx in zip(df_search_cross.p1, df_cross)])
t, v = interpolate_crossing(df_search_cross, df_dist, lonlat=lonlat)
df_c2, df_c1 = zip(*df_cross)
if not feature:
df_result.loc[list(tid), (det_link["_id"], f'rid{n}')] = list(rid)
df_result.loc[list(tid), (det_link["_id"], f'cross_{c1}{n}')] = df_c1
df_result.loc[list(tid), (det_link["_id"], f'cross_{c2}{n}')] = df_c2
df_result.loc[list(tid), (det_link["_id"], f'd{n}')] = df_dist
df_result.loc[list(tid), (det_link["_id"], f't{n}')] = t
df_result.loc[list(tid), (det_link["_id"], f'v{n}')] = v
else:
df_result.loc[list(tid), (det_link["_id"], det_link["index"], f'rid_ts{n}')] = list(rid)
df_result.loc[list(tid), (det_link["_id"], det_link["index"], f'cross_{c1}_ts{n}')] = df_c1
df_result.loc[list(tid), (det_link["_id"], det_link["index"], f'cross_{c2}_ts{n}')] = df_c2
df_result.loc[list(tid), (det_link["_id"], det_link["index"], f'd_ts{n}')] = df_dist
df_result.loc[list(tid), (det_link["_id"], det_link["index"], f't_ts{n}')] = t
df_result.loc[list(tid), (det_link["_id"], det_link["index"], f'v_ts{n}')] = v
df_result.sort_index(inplace=True)
df_result = df_result.transpose()
toc = time.time()
print(f'Finding crossings done, took {toc - tic} sec')
return df_result
def interpolate_crossing(df, p, lonlat=False):
assert 'time' in df.columns
assert 'speed_1' and 'speed_2' in df.columns
assert {'line_length_latlon', 'line_length_yx'}.issubset(set(df.columns))
if lonlat:
dist = df.line_length_latlon.values
else:
dist = df.line_length_yx.values
t = np.round(df.time.values - 1000 + p / dist * 1000)
v = np.round((df.speed_2.values - df.speed_1.values) * (t - df.time.values + 1000) / 1000 + df.speed_1.values, 3)
return t, v
def cleaning_counting(det_c, n_det, double_loops=False):
det_cross = []
indexes_list = []
crossed = {}
for det in range(1, n_det + 1):
for a, b in det_c.iterrows(): # For every time step
if double_loops:
t_cross = [[b[f'times_{det}'][e], b[f'times_lp_{det}'][e]] for e in
range(0, len(b['times_1']))] # Do for every trajectory
det_cross.append(t_cross)
crossed[f'no_cross_{det}'] = []
crossed[f'partly_cross_{det}'] = []
for ind in range(0, len(det_cross[0])): # For every trajectory
cr = False
cr_lp = False
for i in range(len(det_c) * (det - 1), len(det_c) * det): # For every time step for specific detector pair
if bool(det_cross[i][ind][0]):
cr = True
if bool(det_cross[i][ind][1]):
cr_lp = True
if cr and cr_lp:
continue
elif cr != cr_lp:
crossed[f'partly_cross_{det}'].append(ind)
else:
crossed[f'no_cross_{det}'].append(ind)
return crossed
def count_vehicles(d_gdf, gdf_traj, n_det, freq, double_loops, mode_exclusion=(),
vehicle_dim=None):
if vehicle_dim is None:
vehicle_dim = {'Car': [2, 5], 'Motorcycle': [1, 2.5], 'Bus': [4, 12.5], 'Taxi': [2, 5],
'Medium Vehicle': [2.67, 5.83], 'Heavy Vehicle': [3.3, 12.5],
'Bicycle': [0, 0], 'Pedestrian': [0, 0]}
detector_counts = []
vehicle_type = {}
long_distances_dfs = []
# Pre-loading time steps
max_d = 0
max_d = int(max([j['time'].values[len(j) - 1] for j in gdf_traj if j['time'].values[len(j) - 1] > max_d])) + 1
# print(max_d)
# +1 --> To make sure the last trajectory point is included
time_st = []
print('Pre-loading time steps for every trajectory …')
for interval in tqdm(range(freq, max_d + freq, freq)):
pre_interval = interval - freq
steps = [np.logical_and(j['time'].values >= pre_interval, interval > j['time'].values) for j in gdf_traj]
time_st.append(steps)
for i, det_link in d_gdf.iterrows(): # Counting vehicles for every used edge
print(f"Counting on edge: {det_link['index']}")
if double_loops:
loop_dist = round(det_link['loop_distance'], 3)
edge_counts = collections.OrderedDict()
edge_times = collections.OrderedDict()
long_distances = []
index = []
d1 = []
d2 = []
traj_match_values = [] # save values of needed columns for every trajectory
vehicle_type[f"vehicle_type_{det_link['index']}"] = []
vehicle_type[f"vehicle_index_{det_link['index']}"] = []
vehicle_type[f"vehicle_crossing_{det_link['index']}"] = {'index': []}
print('Matched trajectories on edge …')
for k, l in tenumerate(gdf_traj): # Check for every trajectory if it is on the edge
d1.append([tuple(xy) for xy in zip(l['lat_x'], l['lon_x'])]) # Lat-Lon of first datapoint(u) of linestring
d2.append([tuple(xy) for xy in zip(l['lat_y'], l['lon_y'])]) # Lat-Lon of second datapoint(v) of linestring
if np.logical_or(d_gdf.loc[i, 'edge'] in list(l['u_match'].values),
d_gdf.loc[i, 'edge'] in list(l['v_match'].values)):
if l['type'].values[0] in mode_exclusion:
continue
traj_match_values.append(l[['u_match', 'v_match', 'wrong_match', 'time', 'geometry', 'speed_x',
'speed_y', 'bearing_x']].values)
index.append(k)
vehicle_type[f"vehicle_type_{det_link['index']}"].append(l['type'].values[0])
vehicle_type[f"vehicle_index_{det_link['index']}"].append((k, f"ID:{l['track_id'].values[0]}"))
vehicle_type[f"vehicle_crossing_{det_link['index']}"]['index'].append(k)
for det in range(1, n_det + 1):
edge_counts[f'counts_{det}'] = []
edge_times[f'times_{det}'] = []
if double_loops:
edge_counts[f'counts_lp_{det}'] = []
edge_times[f'times_lp_{det}'] = []
# print(index)
print('VKT and VHT for every time step …')
tag = [[0] * n_det for i in range(0, len(index))]
tag_lp = [[0] * n_det for i in range(0, len(index))]
for g, h in tenumerate(time_st): # for every time step
# print('Step: ' + str(g + 1))
cnt = {}
det_time_st = {}
for det in range(1, n_det + 1):
cnt[f'cnt_{det}'] = []
det_time_st[f'time_step_{det}'] = []
if double_loops:
cnt[f'cnt_lp_{det}'] = []
det_time_st[f'time_step_lp_{det}'] = []
for m, n in enumerate(index): # Every trajectory mapped to used edges
cnt_x = {}
traj_t = {}
"""
if vehicle_dim:
veh_l = vehicle_dim[vehicle_type[f"vehicle_type_{det_link['index']}"][m]][1]
else:
veh_l = loop_dist
"""
for det in range(1, n_det + 1):
cnt_x[f'x_{det}'] = 0
traj_t[f't_{det}'] = 0
if double_loops:
cnt_x[f'x_lp_{det}'] = 0
traj_t[f't_lp_{det}'] = 0
for idx, in_timestep in enumerate(time_st[g][n]): # time_st[g][n].iteritems():
if in_timestep:
for det in range(1, n_det + 1):
f = 0 #
if np.logical_or(traj_match_values[m][idx][0] == det_link['edge'],
traj_match_values[m][idx][1] == det_link['edge']):
if np.logical_and(traj_match_values[m][idx][2][0] > 90,
traj_match_values[m][idx][2][1] > 90):
continue
if traj_match_values[m][idx][4].intersects(det_link[f'det_edge_{det}']):
f = 1 # tag to mark index that intersects with detector, prevents errors in traveled
# distance calculation
tag[m][det - 1] = 1
d12 = geodist.distance(d1[n][idx], d2[n][idx]).m
c = traj_match_values[m][idx][4].intersection(det_link[f'det_edge_{det}'])
c = (c.y, c.x)
d1c = round(geodist.distance(d1[n][idx], c).m, 3)
dc2 = round(geodist.distance(c, d2[n][idx]).m, 3)
cnt_x[f'x_{det}'] = 1
if double_loops:
cnt_x[f'x_{det}'] = dc2
traj_t[f't_{det}'] = round(traj_match_values[m][idx][3] - 1000 + d1c / d12 * 1000)
if traj_t[f't_{det}'] < (freq * g):
edge_times[f'times_{det}'][g - 1][m] = traj_t[f't_{det}']
cnt_x[f'x_{det}'] = \
round((traj_match_values[m][idx][3] - freq * g) /
(traj_match_values[m][idx][3] - traj_t[f't_{det}'])
* geodist.distance(c, d2[n][idx]).m, 3)
edge_counts[f'counts_{det}'][g - 1][m] = round(
geodist.distance(c, d2[n][idx]).m -
cnt_x[f'x_{det}'], 3)
if not double_loops:
cnt_x[f'x_{det}'] = 0
edge_counts[f'counts_{det}'][g - 1][m] = 1
traj_t[f't_{det}'] = 0
if double_loops:
if traj_match_values[m][idx][4].intersects(det_link[f'det_edge_{det}bis']):
tag_lp[m][det - 1] = 1
d12 = geodist.distance(d1[n][idx], d2[n][idx]).m
c_lp = traj_match_values[m][idx][4].intersection(det_link[f'det_edge_{det}bis'])
c_lp = (c_lp.y, c_lp.x)
d1c = round(geodist.distance(d1[n][idx], c_lp).m, 3)
cnt_x[f'x_lp_{det}'] = d1c
traj_t[f't_lp_{det}'] = round(
traj_match_values[m][idx][3] - 1000 + d1c / d12 * 1000)
if traj_t[f't_lp_{det}'] < (freq * g): # crossing in previous time step
edge_times[f'times_lp_{det}'][g - 1][m] = traj_t[f't_lp_{det}']
edge_counts[f'counts_lp_{det}'][g - 1][m] = round(
geodist.distance(d1[n][idx],
c_lp).m, 3)
if f > 0:
edge_counts[f'counts_lp_{det}'][g - 1][m] = \
round(geodist.distance(c, c_lp).m, 3)
edge_counts[f'counts_{det}'][g - 1][m] = 0
# round(dist.distance(c, c_lp).m, 3)
cnt_x[f'x_{det}'] = 0
traj_t[f't_lp_{det}'] = 0
cnt_x[f'x_lp_{det}'] = 0
elif f > 0: # same line crosses both detectors
cnt_x[f'x_lp_{det}'] = round(geodist.distance(c, c_lp).m, 3)
# print('Direct crossing ' + str(cnt_x['x_lp_' + str(det)]))
cnt_x[f'x_{det}'] = 0
if g > 0:
if edge_times[f'times_{det}'][g - 1][m]:
cnt_x[f'x_lp_{det}'] = round(geodist.distance(c, c_lp).m -
edge_counts[f'counts_{det}'][
g - 1][m], 3)
elif traj_match_values[m][idx][3] - 1000 < (freq * g):
# first point in previous time step
cnt_x[f'x_lp_{det}'] = round((traj_t[f't_lp_{det}'] - freq * g) /
(traj_t[f't_lp_{det}'] + 1000 -
traj_match_values[m][idx][3]) * d1c, 3)
edge_counts[f'counts_{det}'][g - 1][m] = \
edge_counts[f'counts_{det}'][g - 1][m] + (d1c - cnt_x[f'x_lp_{det}'])
# Add extra distance to existing value
elif tag[m][det - 1] > 0 and tag_lp[m][det - 1] < 1 and f < 1:
d_int = round(geodist.distance(d1[n][idx], d2[n][idx]).m, 3)
if traj_match_values[m][idx][3] - 1000 < (freq * g):
cnt_x[f'x_{det}'] = round((traj_match_values[m][idx][3] - freq * g) /
1000 * d_int, 3)
edge_counts[f'counts_{det}'][g - 1][m] = \
edge_counts[f'counts_{det}'][g - 1][m] + (d_int -
cnt_x[f'x_{det}'])
else:
cnt_x[f'x_{det}'] = round(cnt_x[f'x_{det}'] + d_int, 3)
if g > 0:
if loop_dist < (cnt_x[f'x_{det}'] +
edge_counts[f'counts_{det}'][g - 1][m]):
distance = round(cnt_x[f"x_{det}"] +
edge_counts[f"counts_{det}"][g - 1][m], 3)
long_distances.append([distance, n, idx, traj_match_values[m][idx][2],
g, det])
# If vehicle does not cross both detectors, discarding them can skew the data
# --> the amount of vehicles is not negligible
# after last data point, append traveled distance and time spent
# loop distance is maximum so wrongly matched trajectories have no effect
# elif idx == len(gdf_traj[n])-1 and tag_lp[m][det-1] < 1:
# traj_t['t_lp_' + str(det)] = gdf_traj[n]['time'][idx]
if tag_lp[m][det - 1]: # reset tags --> loops of vehicles are possible
tag[m][det - 1] = 0
tag_lp[m][det - 1] = 0
for det in range(1, n_det + 1):
det_time_st[f'time_step_{det}'].append(traj_t[f't_{det}'])
cnt[f'cnt_{det}'].append(cnt_x[f'x_{det}'])
if double_loops:
det_time_st[f'time_step_lp_{det}'].append(traj_t[f't_lp_{det}'])
cnt[f'cnt_lp_{det}'].append(cnt_x[f'x_lp_{det}'])
for det in range(1, n_det + 1):
edge_counts[f'counts_{det}'].append(cnt[f'cnt_{det}'])
edge_times[f'times_{det}'].append(det_time_st[f'time_step_{det}'])
if double_loops:
edge_counts[f'counts_lp_{det}'].append(cnt[f'cnt_lp_{det}'])
edge_times[f'times_lp_{det}'].append(det_time_st[f'time_step_lp_{det}'])
# print(vehicle_length_traveled)
edge_counts.update(edge_times)
counts = pd.DataFrame(edge_counts)
detector_counts.append(counts)
long_distances = pd.DataFrame(long_distances, columns=['distance', 'trajectory', 'line_index', 'wrong_match',
'time_step', 'detector'])
long_distances_dfs.append(long_distances)
df_veh = pd.DataFrame(vehicle_type[f"vehicle_crossing_{det_link['index']}"])
vehicle_type[f"vehicle_crossing_{det_link['index']}"] = df_veh
return {'counts': detector_counts, 'detectors': d_gdf, 'vehicle_type': vehicle_type,
'long distances': long_distances_dfs}
def save_plot_fd(det_c, parameters_list, parameters_list_mode_sel=None, parameters_list_ao=None, labels=None,
colors=None, veh_area_colorbar=False, name_file=None):
figures = []
if not labels:
labels = ['Original', 'All Modes', 'Mode Selection', 'Adjusted']
if not colors:
colors = ['b', 'r']
for i, j in enumerate(parameters_list):
edge_id = det_c['detectors']['index'].values[i]
loop_distance = det_c['detectors']['loop_distance'].values[i]
if not name_file:
name_file = edge_id
for det in range(1, det_c['info']['number_of_det'][0] + 1):
fig, ax = plt.subplots()
ax.scatter(parameters_list[i][f'density_{det}'], parameters_list[i][f'flow_{det}'],
color=colors[0], label=labels[0])
if veh_area_colorbar:
f = ax.scatter(parameters_list[i]['density_' + str(det)], parameters_list[i]['flow_' + str(det)]
, label=labels[0], c=parameters_list[i][f'vehicle_area_{det}'])
fig.colorbar(f)
ax.legend(loc='upper left')
ax.grid(True)
plt.title(f"FD edge {name_file} (Detector loop: {det} with loop distance {loop_distance} m)")
plt.xlabel('Density k [veh/km]')
plt.ylabel('Flow q [veh/h]')
plt.axis([- 20, int(max(parameters_list[i][f'density_{det}']) + 50),
- 100, 10000])
figures.append((fig, ax))
plt.savefig(f"FD_{det}_{name_file}_{int(det_c['info']['frequency'][0] / 1000)}_lp"
f"{loop_distance}.png")
if parameters_list_mode_sel:
fig, ax_1 = plt.subplots()
ax_1.scatter(parameters_list[i][f'density_{det}'], parameters_list[i][f'flow_{det}'],
color=colors[0], label=labels[1], alpha=0.5)
ax_1.scatter(parameters_list_mode_sel[i][f'density_{det}'],
parameters_list_mode_sel[i][f'flow_{det}'], color=colors[1], label=labels[2])
ax_1.legend(loc='upper left')
ax_1.grid(True)
plt.title(f"FD edge {j['index'].values[0]} (Detector loop: {det})")
plt.xlabel('Density k [veh/km]')
plt.ylabel('Flow q [veh/h]')
figures.append((fig, ax_1))
plt.savefig(f"FD_comparison_modes_loop_{det}_"
f"{j['index'].values[0]}_{int(det_c['info']['frequency'][0] / 1000)}{labels[2]}.png")
if parameters_list_ao:
fig, ax_2 = plt.subplots()
ax_2.scatter(parameters_list[i][f'density_{det}'], parameters_list[i][f'flow_{det}'],
color=colors[0], label=labels[0], alpha=0.5)
ax_2.scatter(parameters_list_ao[i][f'density_{det}'], parameters_list_ao[i][f'flow_{det}'],
color=colors[1], marker='^', label=labels[3])
ax_2.legend(loc='upper left')
ax_2.grid(True)
plt.title(f"FD edge {j['index'].values[0]} (Detector loop: {det})")
plt.xlabel('Density k [veh/km]')
plt.ylabel('Flow q [veh/h]')
figures.append((fig, ax_2))
plt.savefig(f"FD_comparison_loop_{det}_"
f"{j['index'].values[0]}_{int(det_c['info']['frequency'][0] / 1000)}_adjustment.png")
# int(max(max(parameters_list[i]['flow_' + str(det)]),
# max(parameters_list_ao[i]['flow_' + str(det)])) + 500)])
plt.show()
return figures
class TrafficAnalysis:
adjustment_stopped = False, 0
def __init__(self, d_gdf, gdf_traj, gdf_netw, n_det, freq, dfi, loop_distance,
double_loops, mode_exclusion=(),
vehicle_dim=None):
self.IDarterial = list(d_gdf['index'].values)
self.network = gdf_netw
self.numberofdetectors = n_det
self.frequency = freq
self.dfi = dfi
self.double = double_loops
self.loop_distance = None
self.modes_excluded = mode_exclusion
self.vehicle_dimensions = vehicle_dim
if vehicle_dim is None:
vehicle_dim = {'Car': [2, 5], 'Motorcycle': [1, 2.5], 'Bus': [4, 12.5], 'Taxi': [2, 5],
'Medium Vehicle': [2.67, 5.83], 'Heavy Vehicle': [3.3, 12.5],
'Bicycle': [0, 0], 'Pedestrian': [0, 0]}
self.vehicle_dimensions = vehicle_dim
self.traffic_counts = count_vehicles(d_gdf, gdf_traj, n_det, freq, double_loops, mode_exclusion,
self.vehicle_dimensions)
if double_loops:
self.loop_distance = loop_distance
self.traffic_parameters = self.calculate_parameters()
ta_filter = self.filter_stopped_vehicles()
self.traffic_parameters_adj = self.adjustment_stopped_vehicles()
self.traffic_parameters_noLS = ta_filter[0] # No long stops
self.filter_stopped = ta_filter[1]
self.traffic_parameters_arterial = []
self.traffic_parameters_arterial_network = []
self.traffic_parameters_agg = []
def crossings_detectors(self, detector='times_1'):
ind_cross = self.traffic_counts['counts']
def multiple_crossings(self):
counts_dict = self.traffic_counts
n_det = self.numberofdetectors
edge_id = counts_dict['detectors']['index'].values
multi_list = {f'{j}': {f'det_{det}': {'ID': [], 'crosses': []} for det in range(1, n_det + 1)}
for i, j in enumerate(edge_id)}
for ind, counts in enumerate(counts_dict['counts']):
len_idx = len(counts['times_1'][0])
cnt = [[0] * len_idx for det in range(1, n_det + 1)]
for det in range(1, n_det + 1):
for id, row in counts[f'times_lp_{det}'].iteritems():
for i, val in enumerate(row):
if val:
cnt[det - 1][i] += 1
if cnt[det - 1][i] > 1:
multi_list[f'{edge_id[ind]}'][f'det_{det}']['ID']. \
append(counts_dict['vehicle type'][f'vehicle_index_{edge_id[ind]}'][i])
multi_list[f'{edge_id[ind]}'][f'det_{det}']['crosses']. \
append(cnt[det - 1][i])
return multi_list
def calculate_parameters(self, mode_exclusion=()):
edges_parameters = []
hour = 3600000 # Hour in ms to normalize units of flow
# det_c info
n_det = self.numberofdetectors
freq = self.frequency
double = self.double
vehicle_width = self.vehicle_dimensions
# Lanes to float
lanes = []
for x, y in self.traffic_counts['detectors']['lanes'].iteritems():
if type(y) is not list:
y = float(y)
if np.isnan(y): # replace nan values with default number of lanes
y = 1
lanes.append(y)
else:
y = [float(s) for s in y]
y = min(y)
lanes.append(y)
self.traffic_counts['detectors'] = self.traffic_counts['detectors'].assign(lanes_adj=lanes)
# Clean counts
det_error = []
summary_clean_counts = []
# print(det_error)
for a, b in enumerate(tqdm(self.traffic_counts['counts'])): # List of detector counts
error = cleaning_counting(b, double_loops=double, n_det=n_det)
summary = {'edge_index': self.traffic_counts['detectors']['index'].values[a], 'list_clean_counts': []}
for key, value in error.items():
t_count = (key, len(value))
summary['list_clean_counts'].append(t_count)
# print(summary)
summary_clean_counts.append(summary)
det_error.append(error)
n_edge = self.traffic_counts['detectors']['index'].values[a]
loop_distance = self.traffic_counts['detectors']['loop_distance'].values[a]
free_flow = 100
vehicle_type = self.traffic_counts['vehicle_type'][f'vehicle_type_{n_edge}']
get_veh_type = collections.Counter(vehicle_type)
total_matched = len(vehicle_type)
# Density at zero flow and maximum occupancy (100%)
parameters = {}
parameters['index'] = [0] * len(b)
parameters['index'][0] = n_edge
parameters['loop_distance'] = [0] * len(b)
parameters['loop_distance'][0] = loop_distance
parameters['number_of_det'] = [0] * len(b)
parameters['number_of_det'][0] = n_det
parameters['frequency'] = [0] * len(b)
parameters['frequency'][0] = freq
for det in range(1, n_det + 1): # For each detector pair
parameters[f'density_{det}'] = []
parameters[f'density_lane_{det}'] = []
parameters[f'flow_{det}'] = []
parameters[f'flow_lane_{det}'] = []
parameters[f'speed_{det}'] = []
# parameters[f'occupancy_{det}'] = []
# parameters[f'flow_occ_{det}'] = []
# parameters[f'density_occ_{det}'] = []
# parameters[f'speed_occ_{det}'] = []
# parameters[f'length_{det}'] = []
parameters[f'vehicle_area_{det}'] = []
parameters[f'modal_shares_{det}'] = []
parameters[f'vehicles_{det}'] = []
parameters[f'stopped_vehicles_{det}'] = []
parameters[f'stops_{det}'] = []
for veh_type in list(get_veh_type):
parameters[f'{veh_type}_{det}'] = []
df = b.copy(deep=True)
t_cross = df.loc[:, [f'times_{det}', f'times_lp_{det}']].values
# , f'occ_times_in_{det}', f'occ_times_out_{det}']].values
q_cross = df.loc[:, [f'counts_{det}', f'counts_lp_{det}']].values
for c, d in df.iterrows(): # Go over each time step of edge counts
time_spent_loops = []
traveled_distance = []
occupancy = []
flow_occupancy = []
vehicle_length = []
vehicle_area = []
modal_shares = {k: 0 for k in list(get_veh_type)}
stopped_vehicle = [0] * total_matched
for e in range(0, total_matched): # For each trajectory
if e in det_error[a][f'no_cross_{det}']:
continue
elif e in det_error[a][f'partly_cross_{det}']:
"""
if t_cross[c][2][e] > 0:
if t_cross[c][3][e] > 0:
to_1 = t_cross[c][3][e] - t_cross[c][2][e]
# ao_1 = t_1 * vehicle_width[vehicle_type[e]][0] #*vehicle_width[vehicle_type[e]][1])
qo_1 = 1 # * vehicle_width[vehicle_type[e]][0]
flow_occupancy.append(qo_1)
occupancy.append(to_1)
else:
to_2 = (freq * (c + 1) - t_cross[c][2][e])
qo_2 = 0
if c < len(b) - 1: # Change entry time of vehicle in loop
# b.loc[c + 1, f'occ_times_in_{det}'][e] = freq * (c + 1)
t_cross[c + 1][2][e] = freq * (c + 1)
flow_occupancy.append(qo_2)
occupancy.append(to_2)
vehicle_length.append(vehicle_width[vehicle_type[e]][1])
"""
continue
else:
if vehicle_type[e] not in mode_exclusion:
if t_cross[c][0][e] > 0:
if t_cross[c][1][e] > 0:
t_1 = (t_cross[c][1][e] - t_cross[c][0][e]) # / (frequency * loop_distance
# * lanes[n_edge]) * 1000
time_spent_loops.append(t_1)
# print((k_1, c))
q_1 = q_cross[c][0][e] + q_cross[c][1][e]
traveled_distance.append(q_1)
else:
t_2 = (freq * (c + 1) - t_cross[c][0][e]) # / (frequency * loop_distance
# * lanes[n_edge]) * 1000
if c < len(df) - 1: # Change entry time of vehicle in loop
# Vehicles longer than one time step between detectors
t_cross[c + 1][0][e] = freq * (c + 1)
time_spent_loops.append(t_2)
q_2 = q_cross[c][0][e] + q_cross[c][1][e]
traveled_distance.append(q_2)
if t_2 == freq:
stopped_vehicle[e] = 1
vehicle_area.append(np.prod(vehicle_width[vehicle_type[e]]))
modal_shares[vehicle_type[e]] += 1
"""
if t_cross[c][2][e] > 0:
if t_cross[c][3][e] > 0:
to_1 = t_cross[c][3][e] - t_cross[c][2][e]
# ao_1 = t_1 * vehicle_width[vehicle_type[e]][0] #*vehicle_width[vehicle_type[e]][1])
qo_1 = 1 # * vehicle_width[vehicle_type[e]][0]
flow_occupancy.append(qo_1)
occupancy.append(to_1)
else:
to_2 = (freq * (c + 1) - t_cross[c][2][e])
qo_2 = 0
if c < len(b) - 1: # Change entry time of vehicle in loop
t_cross[c + 1][2][e] = freq * (c + 1)
flow_occupancy.append(qo_2)
occupancy.append(to_2)
vehicle_length.append(vehicle_width[vehicle_type[e]][1])
"""
k_time_step = (sum(time_spent_loops) /
(freq * loop_distance) * 1000)
# if sum(time_spent_loops['time_spent_loop_' + str(det)]) > freq:
# print(sum(time_spent_loops['time_spent_loop_' + str(det)]), c)
q_time_step = (sum(traveled_distance) /
(freq * loop_distance) * hour)
# occ = (sum(occupancy)) / (freq * lanes[a])
# occ_flow = (sum(flow_occupancy)) / freq * hour
parameters[f'density_{det}'].append(k_time_step)
parameters[f'density_lane_{det}'].append(k_time_step / lanes[a])
parameters[f'flow_{det}'].append(q_time_step)
parameters[f'flow_lane_{det}'].append(q_time_step / lanes[a])
# parameters[f'occupancy_{det}'].append(occ)
# parameters[f'flow_occ_{det}'].append(occ_flow / lanes[a])
parameters[f'stopped_vehicles_{det}'].append(stopped_vehicle)
parameters[f'stops_{det}'].append(sum(stopped_vehicle))
"""
if vehicle_length:
m_veh_l = np.mean(vehicle_length)
parameters[f'density_occ_{det}'].append(occ / m_veh_l * 1000)
u_occ = round(occ_flow / (occ / m_veh_l * 1000), 2)
parameters[f'speed_occ_{det}'].append(u_occ)
else:
m_veh_l = 0
parameters[f'density_occ_{det}'].append(0)
parameters[f'speed_occ_{det}'].append(0)
parameters[f'length_{det}'].append(m_veh_l)
"""
if k_time_step:
u = round(q_time_step / k_time_step, 2)
parameters[f'vehicle_area_{det}'].append(np.mean(vehicle_area))
total = sum(modal_shares.values())
modal_shares = {k: round(v / total * 100, 1) for k, v in modal_shares.items()}
parameters[f'modal_shares_{det}'].append(modal_shares)
parameters[f'vehicles_{det}'].append(total)
for veh_type in list(get_veh_type):
parameters[f'{veh_type}_{det}'].append(modal_shares[veh_type])
if u < free_flow:
parameters[f'speed_{det}'].append(u)
else:
parameters[f'speed_{det}'].append(free_flow)
else:
parameters[f'speed_{det}'].append(0)
parameters[f'vehicle_area_{det}'].append(0)
parameters[f'modal_shares_{det}'].append(modal_shares)
parameters[f'vehicles_{det}'].append(0)
for veh_type in list(get_veh_type):
parameters[f'{veh_type}_{det}'].append(modal_shares[veh_type])
calc_parameters = pd.DataFrame(parameters)
calc_parameters.reset_index(inplace=True)
edges_parameters.append(calc_parameters)
return edges_parameters
def adjustment_stopped_vehicles(self, stop_time=6):
n_det = self.numberofdetectors
loop_distance = self.traffic_counts['detectors']['loop_distance'].values
lanes = self.traffic_counts['detectors']['lanes_adj'].values
edge_id = list(self.traffic_counts['detectors']['index'].values)
frequency = self.frequency / 1000
new_param_list = []
for ind, param in enumerate(self.traffic_parameters.copy()):
# Value of one stopped vehicle for one time step
filter_value = (1000 / loop_distance[ind])
tag = [0] * n_det
filter = {}
for d in range(1, n_det + 1):
# filter[f'f_{d}'] = [0] * len(param)
filter[f'density_{d}'] = list(param[f'density_{d}'].values)
filter[f'lanes_{d}'] = [lanes[ind]] * len(param)
for id, vt in enumerate(list(collections.Counter(
self.traffic_counts['vehicle_type'][f'vehicle_type_{edge_id[ind]}']).keys())):
filter[f'{vt}_{d}'] = list(param[f'{vt}_{d}'].values)
param = param.drop([f'{vt}_{d}'], axis=1)
for k, v in param.iterrows():
for det in range(1, n_det + 1):
if v[f'stops_{det}'] == 1 and lanes[ind] > 1:
tag[det - 1] += 1
if tag[det - 1] == stop_time:
# filter[f'f_{det}'][k-2] = filter_value
# filter[f'f_{det}'][k-1] = filter_value
for adj in range(0, stop_time):
filter[f'density_{det}'][k - adj] -= filter_value
filter[f'lanes_{det}'][k - adj] -= 1
elif min(tag[det - 1], stop_time) == stop_time:
# filter[f'f_{det}'][k] = filter_value
filter[f'density_{det}'][k] -= filter_value
filter[f'lanes_{det}'][k] -= 1
else:
tag[det - 1] = 0
for det in range(1, n_det + 1):
filter[f'density_lane_{det}'] = np.array(filter[f'density_{det}']) / np.array(filter[f'lanes_{det}'])
filter[f'speed_{det}'] = [
param[f'flow_{det}'][k] / filter[f'density_{det}'][k] if filter[f'density_{det}'][k]
else 0 for k, v in param.iterrows()]
param = param.drop([f'density_{det}', f'density_lane_{det}', f'speed_{det}'], axis=1)
# param[f'density_adj_{det}'] = param[f'density_{det}'] - filter[f'f_{det}']
filter = pd.DataFrame(filter)
param = pd.concat([param, filter], axis=1)
for det in range(1, n_det + 1):
param = param.drop([f'lanes_{det}'], axis=1)
new_param_list.append(param)
self.adjustment_stopped = True, stop_time
return new_param_list
def filter_stopped_vehicles(self, stop_time=0):
if stop_time == 0: # Default is five minutes
stop_time = round(int(300000 / self.frequency))
n_det = self.numberofdetectors
loop_distance = self.traffic_counts['detectors']['loop_distance'].values
lanes = self.traffic_counts['detectors']['lanes_adj'].values
edge_id = list(self.traffic_counts['detectors']['index'].values)
frequency = self.frequency / 1000
new_param_list = []
id_list = []
for ind, param in tenumerate(self.traffic_parameters.copy()):
filter = {}
# Value of one stopped vehicle for one time step
filter_value = (1000 / loop_distance[ind])
tag = [[] for d in range(1, n_det + 1)]
stp_id = [[] for d in range(1, n_det + 1)]
f_id = [[] for d in range(1, n_det + 1)]
f_veh_id = [[] for d in range(1, n_det + 1)]
f_type = [[] for d in range(1, n_det + 1)]
stp_cnt = [[0] * len(param[f'stopped_vehicles_{d}'][0]) for d in range(1, n_det + 1)]
for row, val in param.iterrows():
for d in range(1, n_det + 1):
for id, stop in enumerate(val[f'stopped_vehicles_{d}']):
if stp_cnt[d - 1][id] == stop_time:
for v in range(stop_time, 0, -1):
stp_id[d - 1].append((id, row - v))
elif stp_cnt[d - 1][id] > stop_time:
stp_id[d - 1].append((id, row - 1))
if stop > 0:
stp_cnt[d - 1][id] += 1
else:
stp_cnt[d - 1][id] = 0
for d in range(1, n_det + 1):
filter[f'density_{d}'] = list(param[f'density_{d}'].values)
filter[f'lanes_{d}'] = [lanes[ind]] * len(param)
filter[f'vehicles_{d}'] = list(param[f'vehicles_{d}'].values)
filter[f'stops_{d}'] = list(param[f'stops_{d}'].values)
for id, vt in enumerate(list(collections.Counter(
self.traffic_counts['vehicle_type'][f'vehicle_type_{edge_id[ind]}']).keys())):
filter[f'{vt}_{d}'] = list(param[f'{vt}_{d}'].values)
param = param.drop([f'{vt}_{d}'], axis=1)
# filter[f'f_{d}'] = [0] * len(param)
for r, e in enumerate(stp_id[d - 1]):
veh = self.traffic_counts['vehicle_type'][f'vehicle_type_{edge_id[ind]}'][e[0]]
if e[0] not in f_id[d - 1]:
f_id[d - 1].append(e[0])
f_veh_id[d - 1].append(self.traffic_counts['vehicle_type']
[f'vehicle_index_{edge_id[ind]}'][e[0]][0])
f_type[d - 1].append(veh)
filter[f'density_{d}'][e[1]] -= filter_value
# filter[f'lanes_{d}'][e[1]] -= 1
filter[f'stops_{d}'][e[1]] -= 1
filter[f'vehicles_{d}'][e[1]] -= 1
veh_share = filter[f'{veh}_{d}'][e[1]] * filter[f'vehicles_{d}'][e[1]] - 1
filter[f'{veh}_{d}'][e[1]] = 0
if filter[f'vehicles_{d}'][e[1]] > 0:
filter[f'{veh}_{d}'][e[1]] = veh_share / filter[f'vehicles_{d}'][e[1]]
filter[f'density_lane_{d}'] = np.array(filter[f'density_{d}']) / np.array(filter[f'lanes_{d}'])
filter[f'speed_{d}'] = [
param[f'flow_{d}'][k] / filter[f'density_{d}'][k] if filter[f'density_{d}'][k]
else 0 for k, v in param.iterrows()]
param[f'flow_lane_{d}'] = np.array(param[f'flow_{d}']) / np.array(filter[f'lanes_{d}'])
param = param.drop([f'density_{d}', f'density_lane_{d}', f'modal_shares_{d}',
f'stopped_vehicles_{d}', f'vehicle_area_{d}', f'stops_{d}',
f'vehicles_{d}', f'speed_{d}'], axis=1)
filter = pd.DataFrame(filter)
p = pd.concat([param, filter], axis=1)
for det in range(1, n_det + 1):
p = p.drop([f'lanes_{det}'], axis=1)
new_param_list.append(p)
id_list.append([f_veh_id, f_type])
self.adjustment_stopped = True, stop_time
return new_param_list, id_list
def arterial_parameters(self, aggregation_detector, mode=None, aggregated_parameters=False,
adjusted_parameters=False):
# mode is tuple with (link of arterial, mode, mode share)
n_det = self.numberofdetectors
edge_id = list(self.traffic_counts['detectors']['index'].values)
index_complete_counts = []
parameters = self.traffic_parameters
if aggregated_parameters:
parameters = self.traffic_parameters_agg
elif adjusted_parameters:
parameters = self.traffic_parameters_adj
for ind, param in enumerate(parameters):
s = []
for det in range(1, n_det + 1):
s.append(sum(param[f'vehicles_{det}']))
mu = np.mean(s)
if mu * 1.2 < max(s):
# 20 percent difference between mean of sum of vehicles per time step and maximum of one detector
# raise Exception(f'Link with no counts for some detectors: check parameter list. Sum of vehicles for every'
# f'detector is {s} on edge {edge_id[ind]}')
print('Big differences between detector counts')
else:
index_complete_counts.append(ind)
arterial_length = sum(self.traffic_counts['detectors']['length']
* self.traffic_counts['detectors']['lanes_adj'])
# print(arterial_length)
parameters_arterial = {'accumulation_arterial': 0, 'production_arterial': 0}
denominator = 0
det = aggregation_detector
if mode is not None:
par_mode = parameters[edge_id.index(mode[0])]
ind_mode = list(par_mode[par_mode[f'{mode[1]}_{det}'] > mode[2]].index)
for ind, param in enumerate(parameters):
# if ind not in index_complete_counts:
# print(f'skip: {edge_id[ind]}')
# continue
denom = (self.traffic_counts['detectors']['length'].values[ind])
# * counts_dict['counts']['detectors']['lanes_adj'].values[ind])
production = 0
accumulation = 0
"""
for det in range(1, n_det + 1):
production = production + param[f'flow_{det}']
accumulation = accumulation + param[f'density_{det}']
"""
flow = param[f'flow_{det}']
density = param[f'density_{det}']
if mode is not None:
flow = param[f'flow_{det}'][ind_mode]
density = param[f'density_{det}'][ind_mode]
production = flow * denom
accumulation = (density * denom) # units
parameters_arterial['accumulation_arterial'] += accumulation
parameters_arterial['production_arterial'] += production
denominator = denominator + (denom * n_det)
# print(denominator)
parameters_arterial['accumulation_arterial'] = (parameters_arterial['accumulation_arterial']
/ 1000)
parameters_arterial['production_arterial'] = (parameters_arterial['production_arterial']
/ 1000)
parameters_arterial['average_speed_arterial'] = (parameters_arterial['production_arterial']
/ parameters_arterial['accumulation_arterial'])
art_parameters = | pd.DataFrame(parameters_arterial) | pandas.DataFrame |
#!/usr/bin/env python
"""regression_models.py: module is dedicated to produce the regression models."""
__author__ = "<NAME>."
__copyright__ = "Copyright 2020, SuperDARN@VT"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "<NAME>."
__email__ = "<EMAIL>"
__status__ = "Research"
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import os
import numpy as np
import pandas as pd
import datetime as dt
import statsmodels.formula.api as smf
from statsmodels.iolib.smpickle import load_pickle
from statsmodels.tools.eval_measures import rmse
from pysolar.solar import get_altitude
import boxcar_filter as box
FORMULAS = {
"echoes": "np.log10(echoes+.1) ~ (B_AVG + A_AVG)*(np.power(np.cos(np.deg2rad(sza)),{gamma}))/np.power(tfreq,{delta})",
"absp": "np.log10(absp+.1) ~ (B_AVG + A_AVG)*(np.power(np.cos(np.deg2rad(sza)),{gamma}))/np.power(tfreq,{delta})"
}
def get_gridded_parameters(q, xparam="lon", yparam="lat", zparam="mean"):
plotParamDF = q[ [xparam, yparam, zparam] ]
plotParamDF[xparam] = plotParamDF[xparam].tolist()
plotParamDF[yparam] = plotParamDF[yparam].tolist()
plotParamDF = plotParamDF.groupby( [xparam, yparam] ).mean().reset_index()
plotParamDF = plotParamDF[ [xparam, yparam, zparam] ].pivot( xparam, yparam )
x = plotParamDF.index.values
y = plotParamDF.columns.levels[1].values
X, Y = np.meshgrid( x, y )
# Mask the nan values! pcolormesh can't handle them well!
Z = np.ma.masked_where(
np.isnan(plotParamDF[zparam].values),
plotParamDF[zparam].values)
return X,Y,Z
def plot_global_ditribution(o, fname, d):
fig = plt.figure(dpi=120, figsize=(6,3))
x, y, e = get_gridded_parameters(o)
x, y, sza = get_gridded_parameters(o, zparam="sza")
e[sza>90.] = np.nan
ax = fig.add_subplot(111)
c = ax.pcolormesh(x, y, e.T, cmap="Reds", vmin=0, vmax=50)
fig.colorbar(c, ax=ax)
CS = ax.contour(x, y, sza.T)
ax.clabel(CS, inline=True, fontsize=10)
ax.text(0.01, 1.05, r"$f_0=%.1f\times 10^{6}$Hz"%o.iloc[0].tfreq, ha="left", va="center", transform=ax.transAxes)
ax.text(0.99, 1.05, d.strftime("%Y.%m.%d %H.%M"), ha="right", va="center", transform=ax.transAxes)
fig.savefig(fname, bbox_inches="tight")
plt.close()
return
def get_sza(geos):
d = geos[2].replace(tzinfo=dt.timezone.utc)
sza = 90.-get_altitude(geos[0], geos[1], d)
return sza
def create_global_distribution(dates, freqs=[12.], base="../", ci=0.5):
ls = load_pickle(base + "regressor/model.wls.pickle")
N = 181
lat, lon = np.linspace(-90, 90, N), np.linspace(-180, 180, N)
lats, lons = np.meshgrid(lat, lon)
x = pd.DataFrame()
x["lat"], x["lon"] = lats.ravel(), lons.ravel()
for d in dates:
x["geolocate"] = [(la, lo, d) for la,lo in zip(lats.ravel(), lons.ravel())]
x["sza"] = x.geolocate.apply(lambda a: get_sza(a))
for f in freqs:
x["tfreq"] = [f]*len(x)
g = fetch_goes_data([d,d+dt.timedelta(minutes=1)], sat="g15").iloc[0]
x["B_AVG"] = [g.B_AVG]*len(x)
x["A_AVG"] = [g.A_AVG]*len(x)
ypred = ((10**ls.get_prediction(x).summary_frame(alpha=1-ci))-.1)[["mean", "obs_ci_lower", "obs_ci_upper"]]
o = pd.merge(x, ypred, how="inner", left_index=True, right_index=True).reset_index()
plot_global_ditribution(o, base+"regressor/figures/%s_%.1f.png"%(d.strftime("%Y%m%d.%H%M"),f), d)
return
def fit_global_absorption_models(base="../", fname="regressor/events.csv", check_params=False):
x = pd.DataFrame()
events = pd.read_csv(base + fname, parse_dates=["date"])
for i, e in events.iterrows():
gfile = base + e["goes_fnames"]
pfname = gfile.replace("data/goes_", "proc/")
x = pd.concat([x, | pd.read_csv(pfname, parse_dates=["time"]) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[47]:
import requests # Include HTTP Requests module
from bs4 import BeautifulSoup # Include BS web scraping module
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
# In[48]:
gameID = 'loyola-university-chicago/boxscore/4822'
url = "https://meangreensports.com/sports/mens-basketball/stats/2020-21/" + gameID
r = requests.get(url,verify=False)
soup = BeautifulSoup(r.text, "html.parser")
prds = soup.find_all('section', attrs = {'id':'play-by-play'})
# In[49]:
dfRaw = pd.DataFrame()
for i in prds:
p = 1
T = '00:20:00'
team = ''
action = ''
plr = ''
for prd in i.find_all('div',id=re.compile(r'period')):
for pos in prd.find_all('tr')[1:]:
t = pos.find('th',attrs={'scope':'row'}).text
if re.search(r'\d',t):
T = t
a = pos.find(lambda tag: tag.name == 'td' and
tag.get('class') == ['text-right','hide-on-medium-down']).text.strip()
h = pos.find(lambda tag: tag.name == 'td' and
tag.get('class') == ['hide-on-medium-down']).text.strip()
if len(a)>0:
team = 'A'
action = a
else:
team = 'H'
action = h
try:
plr = action.split(' by ')[1]
except:
print(action)
dfRaw = pd.concat([dfRaw,
pd.DataFrame([[p,T,team,action,plr]],
columns=['Period','Time','Team','ActionRaw','Player'])
])
p += 1
dfRaw['Action'] = dfRaw.ActionRaw.str.extract('([^a-z]{2,})')
# In[50]:
def cleanAction(x):
x = re.sub('^\d+', '', x).lstrip()
x = re.sub('\d+$', '', x).rstrip()
x = re.sub(' by$', '', x).rstrip()
return x
def cleanPeriod(x):
x = int(''.join(filter(str.isdigit, x)))
return x
def cleanPlayer(x):
x = x.lstrip().rstrip()
return x
def getStarters(df):
nprd = df['Period'].max()
periodStart = pd.to_timedelta('00:00:00')
periodEnd = pd.to_timedelta('00:40:00')
if nprd > 2:
n = nprd - 2
while n > 0:
periodEnd += pd.to_timedelta('00:05:00')
n -= 1
lineups2 = df[df.Action.isin(['SUB IN','SUB OUT'])
][['Player','Action','Time','Period','Team']]
linePV = pd.pivot_table(lineups2,index=['Player','Team'],columns='Action',values='Time',aggfunc=np.min).reset_index()
linePV['SUB IN'] = linePV['SUB IN'].fillna(periodStart)
linePV['SUB OUT'] = linePV['SUB OUT'].fillna(periodEnd)
starters = linePV[
(
(linePV['SUB OUT'] < linePV['SUB IN'])
)
|
(
(linePV['SUB IN'] == '00:00:00')
)
][['Team','Player','SUB OUT','SUB IN']]
return list(starters[starters['Team']=='H']['Player']),list(starters[starters['Team']=='A']['Player'])
def getStartersByPeriod(df,p):
periodStart = pd.to_timedelta('00:00:00')
periodEnd = pd.to_timedelta('00:20:00')
if p > 2:
periodEnd = pd.to_timedelta('00:05:00')
lineups2 = df[
df.Action.isin(['SUB IN','SUB OUT'])
][['Player','Action','Time','Period','Team']]
lineups2 = lineups2[lineups2['Period']==p]
linePV = pd.pivot_table(lineups2,index=['Player','Team'],columns='Action',values='Time',aggfunc=np.min).reset_index()
linePV['SUB IN'] = linePV['SUB IN'].fillna(periodStart)
linePV['SUB OUT'] = linePV['SUB OUT'].fillna(periodEnd)
starters = linePV[
(
(linePV['SUB OUT'] < linePV['SUB IN'])
)
|
(
(linePV['SUB IN'] == '00:00:00')
)
][['Team','Player','SUB OUT','SUB IN']]
return list(starters[starters['Team']=='H']['Player']),list(starters[starters['Team']=='A']['Player'])
def extractParens(s):
pat = '\(([^)]+)'
if re.search(pat,s):
s = re.findall(pat, s)[0]
else:
s = ''
return s
def removeParens(x):
return x.split("(")[0]
# In[51]:
try:
dfRaw['Duration'] = pd.to_datetime(dfRaw['Time'].astype(str)).diff().dt.total_seconds().div(-60)
except:
dfRaw['Duration'] = 0
# In[52]:
actValMap = {
'MISS LAYUP':0
, 'REBOUND DEF':0
, 'GOOD JUMPER':2
, 'MISS 3PTR':0
, 'REBOUND OFF':0
, 'GOOD 3PTR':3
, 'ASSIST':0
, 'FOUL':0
, 'GOOD LAYUP':2
, 'BLOCK':0
, 'TIMEOUT 30SEC':0
, 'SUB OUT':0
, 'SUB IN':0
, 'TURNOVER':0
, 'STEAL':0
, 'MISS JUMPER':0
, 'TIMEOUT MEDIA':0
, 'REBOUND DEADB':0
, 'GOOD FT':1
, 'GOOD DUNK':2
, 'MISS FT':0
}
# In[53]:
dfRaw['Action'] = dfRaw['Action'].apply(cleanAction)
#dfRaw['Period'] = dfRaw['Period'].apply(cleanPeriod)#.apply(int)
#dfRaw['Duration'] = df['duration'].apply(int)
dfRaw['ActionValue'] = dfRaw['Action'].map(actValMap).map(int,na_action='ignore')
dfRaw['Time'] = pd.to_timedelta('00:'+dfRaw['Time'])
dfRaw.loc[dfRaw['Period'] <= 2,'Time'] = pd.to_timedelta('00:20:00') - dfRaw.loc[dfRaw['Period'] <= 2,'Time']
dfRaw.loc[dfRaw['Period'] > 2,'Time'] = pd.to_timedelta('00:05:00') - dfRaw.loc[dfRaw['Period'] > 2,'Time']
dfRaw.loc[dfRaw['Period'] == 2,'Time'] += pd.to_timedelta('00:20:00')
dfRaw.loc[dfRaw['Period'] == 3,'Time'] += | pd.to_timedelta('00:25:00') | pandas.to_timedelta |
"""
Routines for casting.
"""
from contextlib import suppress
from datetime import date, datetime, timedelta
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Set,
Sized,
Tuple,
Type,
Union,
)
import numpy as np
from pandas._libs import lib, tslib, tslibs
from pandas._libs.tslibs import (
NaT,
OutOfBoundsDatetime,
Period,
Timedelta,
Timestamp,
conversion,
iNaT,
ints_to_pydatetime,
ints_to_pytimedelta,
)
from pandas._libs.tslibs.timezones import tz_compare
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj, Scalar, Shape
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
POSSIBLY_CAST_DTYPES,
TD64NS_DTYPE,
ensure_int8,
ensure_int16,
ensure_int32,
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_categorical_dtype,
is_complex,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
ExtensionDtype,
IntervalDtype,
PeriodDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeArray,
ABCDatetimeIndex,
ABCExtensionArray,
ABCPeriodArray,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import is_list_like
from pandas.core.dtypes.missing import (
is_valid_nat_for_dtype,
isna,
na_value_for_dtype,
notna,
)
if TYPE_CHECKING:
from pandas import Series
from pandas.core.arrays import ExtensionArray
from pandas.core.indexes.base import Index
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple, range)):
values = construct_1d_object_array_from_listlike(values)
if getattr(values, "dtype", None) == np.object_:
if hasattr(values, "_values"):
values = values._values
values = lib.maybe_convert_objects(values)
return values
def is_nested_object(obj) -> bool:
"""
return a boolean if we have a nested object, e.g. a Series with 1 or
more Series elements
This may not be necessarily be performant.
"""
if isinstance(obj, ABCSeries) and is_object_dtype(obj.dtype):
if any(isinstance(v, ABCSeries) for v in obj._values):
return True
return False
def maybe_box_datetimelike(value: Scalar, dtype: Optional[Dtype] = None) -> Scalar:
"""
Cast scalar to Timestamp or Timedelta if scalar is datetime-like
and dtype is not object.
Parameters
----------
value : scalar
dtype : Dtype, optional
Returns
-------
scalar
"""
if dtype == object:
pass
elif isinstance(value, (np.datetime64, datetime)):
value = tslibs.Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = | tslibs.Timedelta(value) | pandas._libs.tslibs.Timedelta |
from __future__ import division
import logging
from os import path
import time
from ast import literal_eval
import traceback
from flask import request
from sqlalchemy.sql import select
from sqlalchemy.sql import text
import settings
import skyline_version
from skyline_functions import (
mkdir_p,
get_redis_conn_decoded,
get_redis_conn,
)
from database import (
get_engine, engine_disposal, ionosphere_table_meta, metrics_table_meta,
ionosphere_matched_table_meta,
ionosphere_layers_matched_table_meta,
anomalies_table_meta,
)
skyline_version = skyline_version.__absolute_version__
skyline_app = 'webapp'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
try:
ENABLE_WEBAPP_DEBUG = settings.ENABLE_WEBAPP_DEBUG
except EnvironmentError as e:
logger.error('error :: cannot determine ENABLE_WEBAPP_DEBUG from settings - %s' % e)
ENABLE_WEBAPP_DEBUG = False
# @added 20210107 - Feature #3934: ionosphere_performance
def get_ionosphere_performance(
metric, metric_like, from_timestamp, until_timestamp, format,
# @added 20210128 - Feature #3934: ionosphere_performance
# Improve performance and pass arguments to get_ionosphere_performance
# for cache key
anomalies, new_fps, fps_matched_count, layers_matched_count,
sum_matches, title, period, height, width, fp_type, timezone_str):
"""
Analyse the performance of Ionosphere on a metric or metric namespace and
create the graph resources or json data as required.
:rtype: dict
"""
import datetime
import pytz
import pandas as pd
dev_null = None
ionosphere_performance_debug = False
determine_start_timestamp = False
redis_conn = None
redis_conn_decoded = None
# @added 20210202 - Feature #3934: ionosphere_performance
# Handle user timezone
tz_from_timestamp_datetime_obj = None
tz_until_timestamp_datetime_obj = None
utc_epoch_timestamp = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone(datetime.timedelta(seconds=0)))
determine_timezone_start_date = False
determine_timezone_end_date = False
user_timezone = pytz.timezone(timezone_str)
utc_timezone = pytz.timezone('UTC')
# @added 20210203 - Feature #3934: ionosphere_performance
# Add default timestamp
start_timestamp = 0
end_timestamp = 0
if from_timestamp == 0:
start_timestamp = 0
determine_start_timestamp = True
if from_timestamp != 0:
if ":" in from_timestamp:
# @modified 20210202 - Feature #3934: ionosphere_performance
# Handle user timezone
if timezone_str == 'UTC':
new_from_timestamp = time.mktime(datetime.datetime.strptime(from_timestamp, '%Y%m%d %H:%M').timetuple())
logger.info('get_ionosphere_performance - new_from_timestamp - %s' % str(new_from_timestamp))
else:
utc_from_timestamp = time.mktime(datetime.datetime.strptime(from_timestamp, '%Y%m%d %H:%M').timetuple())
logger.info('get_ionosphere_performance - utc_from_timestamp - %s' % str(utc_from_timestamp))
from_timestamp_datetime_obj = datetime.datetime.strptime(from_timestamp, '%Y%m%d %H:%M')
logger.info('get_ionosphere_performance - from_timestamp_datetime_obj - %s' % str(from_timestamp_datetime_obj))
tz_offset = pytz.timezone(timezone_str).localize(from_timestamp_datetime_obj).strftime('%z')
tz_from_date = '%s:00 %s' % (from_timestamp, tz_offset)
logger.info('get_ionosphere_performance - tz_from_date - %s' % str(tz_from_date))
tz_from_timestamp_datetime_obj = datetime.datetime.strptime(tz_from_date, '%Y%m%d %H:%M:%S %z')
tz_epoch_timestamp = int((tz_from_timestamp_datetime_obj - utc_epoch_timestamp).total_seconds())
new_from_timestamp = tz_epoch_timestamp
# new_from_timestamp = time.mktime(datetime.datetime.strptime(tz_from_timestamp, '%Y%m%d %H:%M:%S %z').timetuple())
logger.info('get_ionosphere_performance - new_from_timestamp - %s' % str(new_from_timestamp))
determine_timezone_start_date = True
start_timestamp = int(new_from_timestamp)
# @added 20210203 - Feature #3934: ionosphere_performance
# Add default timestamp
else:
if from_timestamp == 'all':
start_timestamp = 0
determine_start_timestamp = True
else:
start_timestamp = int(from_timestamp)
if from_timestamp == 'all':
start_timestamp = 0
determine_start_timestamp = True
if from_timestamp == 'all':
start_timestamp = 0
determine_start_timestamp = True
if until_timestamp and until_timestamp != 'all':
if ":" in until_timestamp:
if timezone_str == 'UTC':
new_until_timestamp = time.mktime(datetime.datetime.strptime(until_timestamp, '%Y%m%d %H:%M').timetuple())
else:
until_timestamp_datetime_obj = datetime.datetime.strptime(until_timestamp, '%Y%m%d %H:%M')
tz_offset = pytz.timezone(timezone_str).localize(until_timestamp_datetime_obj).strftime('%z')
tz_until_date = '%s:00 %s' % (until_timestamp, tz_offset)
logger.info('get_ionosphere_performance - tz_until_date - %s' % str(tz_until_date))
tz_until_timestamp_datetime_obj = datetime.datetime.strptime(tz_until_date, '%Y%m%d %H:%M:%S %z')
tz_epoch_timestamp = int((tz_until_timestamp_datetime_obj - utc_epoch_timestamp).total_seconds())
new_from_timestamp = tz_epoch_timestamp
# new_from_timestamp = time.mktime(datetime.datetime.strptime(tz_until_timestamp, '%Y%m%d %H:%M:%S %z').timetuple())
end_timestamp = int(new_until_timestamp)
# @added 20210203 - Feature #3934: ionosphere_performance
# Add default timestamp
else:
if until_timestamp == 'all':
end_timestamp = int(time.time())
else:
end_timestamp = int(until_timestamp)
determine_timezone_end_date = False
if until_timestamp == 'all':
end_timestamp = int(time.time())
determine_timezone_end_date = True
if until_timestamp == 0:
end_timestamp = int(time.time())
determine_timezone_end_date = True
start_timestamp_str = str(start_timestamp)
# end_timestamp_str = str(end_timestamp)
if timezone_str == 'UTC':
begin_date = datetime.datetime.utcfromtimestamp(start_timestamp).strftime('%Y-%m-%d')
end_date = datetime.datetime.utcfromtimestamp(end_timestamp).strftime('%Y-%m-%d')
else:
if determine_timezone_start_date:
logger.info('get_ionosphere_performance - determine_timezone_start_date - True')
# non_tz_start_datetime_object = datetime.datetime.utcfromtimestamp(start_timestamp)
# logger.info('get_ionosphere_performance - non_tz_start_datetime_object - %s' % str(non_tz_start_datetime_object))
# tz_start_datetime_object = utc_timezone.localize(non_tz_start_datetime_object).astimezone(user_timezone)
# logger.info('get_ionosphere_performance - tz_end_datetime_object - %s' % str(tz_start_datetime_object))
begin_date = tz_from_timestamp_datetime_obj.strftime('%Y-%m-%d')
logger.info('get_ionosphere_performance - begin_date with %s timezone applied - %s' % (timezone_str, str(begin_date)))
else:
begin_date = datetime.datetime.utcfromtimestamp(start_timestamp).strftime('%Y-%m-%d')
if determine_timezone_end_date:
logger.info('get_ionosphere_performance - determine_timezone_end_date - True')
non_tz_end_datetime_object = datetime.datetime.utcfromtimestamp(end_timestamp)
logger.info('get_ionosphere_performance - non_tz_end_datetime_object - %s' % str(non_tz_end_datetime_object))
tz_end_datetime_object = utc_timezone.localize(non_tz_end_datetime_object).astimezone(user_timezone)
logger.info('get_ionosphere_performance - tz_end_datetime_object - %s' % str(tz_end_datetime_object))
end_date = tz_end_datetime_object.strftime('%Y-%m-%d')
logger.info('get_ionosphere_performance - end_date with %s timezone applied - %s' % (timezone_str, str(end_date)))
else:
logger.info('get_ionosphere_performance - determine_timezone_end_date - False')
end_date = datetime.datetime.utcfromtimestamp(end_timestamp).strftime('%Y-%m-%d')
original_begin_date = begin_date
# Determine period
frequency = 'D'
if 'period' in request.args:
period = request.args.get('period', 'daily')
if period == 'daily':
frequency = 'D'
extended_end_timestamp = end_timestamp + 86400
if period == 'weekly':
frequency = 'W'
extended_end_timestamp = end_timestamp + (86400 * 7)
if period == 'monthly':
frequency = 'M'
extended_end_timestamp = end_timestamp + (86400 * 30)
extended_end_date = datetime.datetime.utcfromtimestamp(extended_end_timestamp).strftime('%Y-%m-%d')
remove_prefix = False
try:
remove_prefix_str = request.args.get('remove_prefix', 'false')
if remove_prefix_str != 'false':
remove_prefix = True
except Exception as e:
dev_null = e
# Allow for the removal of a prefix from the metric name
use_metric_name = metric
if remove_prefix:
try:
if remove_prefix_str.endswith('.'):
remove_prefix = '%s' % remove_prefix_str
else:
remove_prefix = '%s.' % remove_prefix_str
use_metric_name = metric.replace(remove_prefix, '')
except Exception as e:
logger.error('error :: failed to remove prefix %s from %s - %s' % (str(remove_prefix_str), metric, e))
# @added 20210129 - Feature #3934: ionosphere_performance
# Improve performance and pass arguments to get_ionosphere_performance
# for cache key
yesterday_timestamp = end_timestamp - 86400
yesterday_end_date = datetime.datetime.utcfromtimestamp(yesterday_timestamp).strftime('%Y-%m-%d')
metric_like_str = str(metric_like)
metric_like_wildcard = metric_like_str.replace('.%', '')
# @modified 20210202 - Feature #3934: ionosphere_performance
# Handle user timezone
yesterday_data_cache_key = 'performance.%s.metric.%s.metric_like.%s.begin_date.%s.tz.%s.anomalies.%s.new_fps.%s.fps_matched_count.%s.layers_matched_count.%s.sum_matches.%s.period.%s.fp_type.%s' % (
str(yesterday_end_date), str(metric), metric_like_wildcard, str(begin_date),
str(timezone_str), str(anomalies), str(new_fps), str(fps_matched_count),
str(layers_matched_count), str(sum_matches), str(period), str(fp_type))
logger.info('get_ionosphere_performance - yesterday_data_cache_key - %s' % yesterday_data_cache_key)
try:
redis_conn_decoded = get_redis_conn_decoded(skyline_app)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_ionosphere_performance :: get_redis_conn_decoded failed')
dev_null = e
yesterday_data_raw = None
try:
yesterday_data_raw = redis_conn_decoded.get(yesterday_data_cache_key)
except Exception as e:
trace = traceback.format_exc()
fail_msg = 'error :: get_ionosphere_performance - could not get Redis data for - %s' % yesterday_data_cache_key
logger.error(trace)
logger.error(fail_msg)
dev_null = e
yesterday_data = None
if yesterday_data_raw:
try:
yesterday_data = literal_eval(yesterday_data_raw)
except Exception as e:
trace = traceback.format_exc()
fail_msg = 'error :: get_ionosphere_performance - could not get literal_eval Redis data from key - %s' % yesterday_data_cache_key
logger.error(trace)
logger.error(fail_msg)
dev_null = e
if yesterday_data:
logger.info('get_ionosphere_performance - using cache data from yesterday with %s items' % str(len(yesterday_data)))
new_from = '%s 23:59:59' % yesterday_end_date
# @modified 20210202 - Feature #3934: ionosphere_performance
# Handle user timezone
if timezone_str == 'UTC':
new_from_timestamp = time.mktime(datetime.datetime.strptime(new_from, '%Y-%m-%d %H:%M:%S').timetuple())
start_timestamp = int(new_from_timestamp) + 1
begin_date = datetime.datetime.utcfromtimestamp(start_timestamp).strftime('%Y-%m-%d')
else:
tz_new_from_timestamp_datetime_obj = datetime.datetime.strptime(new_from, '%Y-%m-%d %H:%M:%S')
tz_offset = pytz.timezone(timezone_str).localize(tz_new_from_timestamp_datetime_obj).strftime('%z')
tz_from_timestamp = '%s %s' % (new_from, tz_offset)
new_from_timestamp = time.mktime(datetime.datetime.strptime(tz_from_timestamp, '%Y-%m-%d %H:%M:%S %z').timetuple())
start_timestamp = int(new_from_timestamp) + 1
begin_date = tz_new_from_timestamp_datetime_obj.strftime('%Y-%m-%d')
logger.info('get_ionosphere_performance - using cache data from yesterday, set new start_timestamp: %s, begin_date: %s' % (
str(start_timestamp), str(begin_date)))
determine_start_timestamp = False
else:
logger.info('get_ionosphere_performance - no cache data for yesterday_data')
try:
engine, fail_msg, trace = get_engine(skyline_app)
logger.info(fail_msg)
except Exception as e:
trace = traceback.format_exc()
logger.error(trace)
logger.error('%s' % fail_msg)
logger.error('error :: get_ionosphere_performance - could not get a MySQL engine')
dev_null = e
raise # to webapp to return in the UI
if not engine:
trace = 'none'
fail_msg = 'error :: get_ionosphere_performance - engine not obtained'
logger.error(fail_msg)
raise
try:
metrics_table, log_msg, trace = metrics_table_meta(skyline_app, engine)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_ionosphere_performance - failed to get metrics_table meta')
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise # to webapp to return in the UI
metric_id = None
metric_ids = []
if metric_like != 'all':
metric_like_str = str(metric_like)
logger.info('get_ionosphere_performance - metric_like - %s' % metric_like_str)
metrics_like_query = text("""SELECT id FROM metrics WHERE metric LIKE :like_string""")
metric_like_wildcard = metric_like_str.replace('.%', '')
request_key = '%s.%s.%s.%s' % (metric_like_wildcard, begin_date, end_date, frequency)
plot_title = '%s - %s' % (metric_like_wildcard, period)
logger.info('get_ionosphere_performance - metric like query, cache key being generated from request key - %s' % request_key)
try:
connection = engine.connect()
result = connection.execute(metrics_like_query, like_string=metric_like_str)
connection.close()
for row in result:
m_id = row['id']
metric_ids.append(int(m_id))
except Exception as e:
trace = traceback.format_exc()
logger.error(trace)
logger.error('error :: get_ionosphere_performance - could not determine ids from metrics table LIKE query - %s' % e)
if engine:
engine_disposal(skyline_app, engine)
return {}
start_timestamp_date = None
# If the from_timestamp is 0 or all
if determine_start_timestamp:
created_dates = []
try:
connection = engine.connect()
# stmt = select([metrics_table.c.created_timestamp], metrics_table.c.id.in_(metric_ids)).limit(1)
stmt = select([metrics_table.c.created_timestamp], metrics_table.c.id.in_(metric_ids))
result = connection.execute(stmt)
for row in result:
# start_timestamp_date = row['created_timestamp']
created_dates.append(row['created_timestamp'])
# break
connection.close()
start_timestamp_date = sorted(created_dates)[0]
if not start_timestamp_date:
logger.error('error :: get_ionosphere_performance - could not determine created_timestamp - returning empty')
if engine:
engine_disposal(skyline_app, engine)
return {}
start_timestamp_str = str(start_timestamp_date)
logger.info('get_ionosphere_performance - determined start_timestamp_str - %s' % start_timestamp_str)
new_from_timestamp = time.mktime(datetime.datetime.strptime(start_timestamp_str, '%Y-%m-%d %H:%M:%S').timetuple())
start_timestamp = int(new_from_timestamp)
logger.info('get_ionosphere_performance - determined start_timestamp - %s' % str(start_timestamp))
begin_date = datetime.datetime.utcfromtimestamp(start_timestamp).strftime('%Y-%m-%d')
logger.info('get_ionosphere_performance - determined begin_date - %s' % str(begin_date))
# @added 20210203 - Feature #3934: ionosphere_performance
# Handle user timezone
if timezone_str != 'UTC':
logger.info('get_ionosphere_performance - determining %s datetime from UTC start_timestamp_str - %s' % (timezone_str, str(start_timestamp_str)))
from_timestamp_datetime_obj = datetime.datetime.strptime(start_timestamp_str, '%Y-%m-%d %H:%M:%S')
logger.info('get_ionosphere_performance - from_timestamp_datetime_obj - %s' % str(from_timestamp_datetime_obj))
tz_offset = pytz.timezone(timezone_str).localize(from_timestamp_datetime_obj).strftime('%z')
tz_from_date = '%s %s' % (start_timestamp_str, tz_offset)
logger.info('get_ionosphere_performance - tz_from_date - %s' % str(tz_from_date))
tz_from_timestamp_datetime_obj = datetime.datetime.strptime(tz_from_date, '%Y-%m-%d %H:%M:%S %z')
begin_date = tz_from_timestamp_datetime_obj.strftime('%Y-%m-%d')
logger.info('get_ionosphere_performance - begin_date with %s timezone applied - %s' % (timezone_str, str(begin_date)))
determine_start_timestamp = False
request_key = '%s.%s.%s.%s' % (metric_like_wildcard, begin_date, end_date, frequency)
except Exception as e:
trace = traceback.format_exc()
logger.error(trace)
logger.error('error :: get_ionosphere_performance - could not determine ids from metrics table LIKE query - %s' % e)
if engine:
engine_disposal(skyline_app, engine)
return {}
logger.info('get_ionosphere_performance - metric_ids length - %s' % str(len(metric_ids)))
if not metric_ids:
# stmt = select([metrics_table]).where(metrics_table.c.id > 0)
if metric == 'all':
request_key = 'all.%s.%s.%s' % (begin_date, end_date, frequency)
plot_title = 'All metrics - %s' % period
logger.info('get_ionosphere_performance - metric all query, cache key being generated from request key - %s' % request_key)
# If the from_timestamp is 0 or all
if determine_start_timestamp:
try:
connection = engine.connect()
stmt = select([metrics_table.c.created_timestamp]).limit(1)
result = connection.execute(stmt)
for row in result:
start_timestamp_date = row['created_timestamp']
break
connection.close()
start_timestamp_str = str(start_timestamp_date)
logger.info('get_ionosphere_performance - determined start_timestamp_str - %s' % start_timestamp_str)
new_from_timestamp = time.mktime(datetime.datetime.strptime(start_timestamp_str, '%Y-%m-%d %H:%M:%S').timetuple())
start_timestamp = int(new_from_timestamp)
logger.info('get_ionosphere_performance - determined start_timestamp - %s' % str(start_timestamp))
begin_date = datetime.datetime.utcfromtimestamp(start_timestamp).strftime('%Y-%m-%d')
logger.info('get_ionosphere_performance - determined begin_date - %s' % str(begin_date))
# @added 20210203 - Feature #3934: ionosphere_performance
# Handle user timezone
if timezone_str != 'UTC':
logger.info('get_ionosphere_performance - determining %s datetime from UTC start_timestamp_str - %s' % (timezone_str, str(start_timestamp_str)))
from_timestamp_datetime_obj = datetime.datetime.strptime(start_timestamp_str, '%Y-%m-%d %H:%M:%S')
logger.info('get_ionosphere_performance - from_timestamp_datetime_obj - %s' % str(from_timestamp_datetime_obj))
tz_offset = pytz.timezone(timezone_str).localize(from_timestamp_datetime_obj).strftime('%z')
tz_from_date = '%s %s' % (start_timestamp_str, tz_offset)
logger.info('get_ionosphere_performance - tz_from_date - %s' % str(tz_from_date))
tz_from_timestamp_datetime_obj = datetime.datetime.strptime(tz_from_date, '%Y-%m-%d %H:%M:%S %z')
begin_date = tz_from_timestamp_datetime_obj.strftime('%Y-%m-%d')
logger.info('get_ionosphere_performance - begin_date with %s timezone applied - %s' % (timezone_str, str(begin_date)))
determine_start_timestamp = False
request_key = 'all.%s.%s.%s' % (begin_date, end_date, frequency)
logger.info('get_ionosphere_performance - metric all query, determine_start_timestamp cache key being generated from request key - %s' % request_key)
except Exception as e:
trace = traceback.format_exc()
logger.error(trace)
logger.error('error :: get_ionosphere_performance - could not determine ids from metrics table LIKE query - %s' % e)
if engine:
engine_disposal(skyline_app, engine)
return {}
try:
request_key = '%s.%s.%s.%s' % (metric, begin_date, end_date, frequency)
plot_title = '%s - %s' % (use_metric_name, period)
logger.info('get_ionosphere_performance - metric all query, cache key being generated from request key - %s' % request_key)
connection = engine.connect()
stmt = select([metrics_table]).where(metrics_table.c.id > 0)
result = connection.execute(stmt)
for row in result:
metric_id_str = row['id']
r_metric_id = int(metric_id_str)
metric_ids.append(r_metric_id)
connection.close()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_ionosphere_performance - could not determine metric ids from metrics - %s' % e)
if engine:
engine_disposal(skyline_app, engine)
raise
if metric != 'all':
logger.info('get_ionosphere_performance - metric - %s' % metric)
try:
request_key = '%s.%s.%s.%s' % (metric, begin_date, end_date, frequency)
plot_title = '%s - %s' % (use_metric_name, period)
logger.info('get_ionosphere_performance - metric query, cache key being generated from request key - %s' % request_key)
connection = engine.connect()
stmt = select([metrics_table]).where(metrics_table.c.metric == str(metric))
result = connection.execute(stmt)
for row in result:
metric_id_str = row['id']
metric_id = int(metric_id_str)
connection.close()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_ionosphere_performance - could not determine metric id from metrics - %s' % e)
if engine:
engine_disposal(skyline_app, engine)
raise
if determine_start_timestamp and metric_id:
try:
connection = engine.connect()
stmt = select([metrics_table.c.created_timestamp]).where(metrics_table.c.metric == str(metric))
result = connection.execute(stmt)
for row in result:
start_timestamp_date = row['created_timestamp']
break
connection.close()
start_timestamp_str = str(start_timestamp_date)
logger.info('get_ionosphere_performance - determined start_timestamp_str - %s' % start_timestamp_str)
new_from_timestamp = time.mktime(datetime.datetime.strptime(start_timestamp_str, '%Y-%m-%d %H:%M:%S').timetuple())
start_timestamp = int(new_from_timestamp)
logger.info('get_ionosphere_performance - determined start_timestamp - %s' % str(start_timestamp))
begin_date = datetime.datetime.utcfromtimestamp(start_timestamp).strftime('%Y-%m-%d')
logger.info('get_ionosphere_performance - determined begin_date - %s' % str(begin_date))
request_key = '%s.%s.%s.%s' % (metric, begin_date, end_date, frequency)
logger.info('get_ionosphere_performance - metric query, determine_start_timestamp cache key being generated from request key - %s' % request_key)
except Exception as e:
trace = traceback.format_exc()
logger.error(trace)
logger.error('error :: get_ionosphere_performance - could not determine ids from metrics table LIKE query - %s' % e)
if engine:
engine_disposal(skyline_app, engine)
return {}
logger.info('get_ionosphere_performance - metric - %s' % str(metric))
logger.info('get_ionosphere_performance - metric_id - %s' % str(metric_id))
if metric != 'all':
if not metric_ids and not metric_id:
if engine:
engine_disposal(skyline_app, engine)
logger.info('get_ionosphere_performance - no metric_id or metric_ids, nothing to do')
performance = {
'performance': {'date': None, 'reason': 'no metric data found'},
'request_key': request_key,
'success': False,
'reason': 'no data for metric/s',
'plot': None,
'csv': None,
}
return performance
logger.info('get_ionosphere_performance - metric_id: %s, metric_ids length: %s' % (
str(metric_id), str(len(metric_ids))))
# Create request_key performance directory
ionosphere_dir = path.dirname(settings.IONOSPHERE_DATA_FOLDER)
performance_dir = '%s/performance/%s' % (ionosphere_dir, request_key)
if not path.exists(performance_dir):
mkdir_p(performance_dir)
# Report anomalies
report_anomalies = False
if 'anomalies' in request.args:
anomalies_str = request.args.get('performance', 'false')
if anomalies_str == 'true':
report_anomalies = True
anomalies = []
anomalies_ts = []
if report_anomalies:
try:
anomalies_table, log_msg, trace = anomalies_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('anomalies_table OK')
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: failed to get anomalies_table meta')
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise # to webapp to return in the UI
try:
connection = engine.connect()
if metric_ids:
# stmt = select([anomalies_table.c.id, anomalies_table.c.anomaly_timestamp], anomalies_table.c.metric_id.in_(metric_ids)).\
stmt = select([anomalies_table.c.id, anomalies_table.c.metric_id, anomalies_table.c.anomaly_timestamp]).\
where(anomalies_table.c.anomaly_timestamp >= start_timestamp).\
where(anomalies_table.c.anomaly_timestamp <= end_timestamp)
result = connection.execute(stmt)
elif metric_id:
stmt = select([anomalies_table.c.id, anomalies_table.c.metric_id, anomalies_table.c.anomaly_timestamp]).\
where(anomalies_table.c.metric_id == int(metric_id)).\
where(anomalies_table.c.anomaly_timestamp >= start_timestamp).\
where(anomalies_table.c.anomaly_timestamp <= end_timestamp)
result = connection.execute(stmt)
else:
stmt = select([anomalies_table.c.id, anomalies_table.c.metric_id, anomalies_table.c.anomaly_timestamp]).\
where(anomalies_table.c.anomaly_timestamp >= start_timestamp).\
where(anomalies_table.c.anomaly_timestamp <= end_timestamp)
result = connection.execute(stmt)
for row in result:
r_metric_id = row['metric_id']
append_result = False
if r_metric_id == metric_id:
append_result = True
if not append_result:
if r_metric_id in metric_ids:
append_result = True
if append_result:
anomaly_id = row['id']
anomaly_timestamp = row['anomaly_timestamp']
anomalies.append(int(anomaly_timestamp))
# anomalies_ts.append([datetime.datetime.fromtimestamp(int(anomaly_timestamp)), int(anomaly_id)])
anomalies_ts.append([int(anomaly_timestamp), int(anomaly_id)])
connection.close()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: could not determine anomaly ids')
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise
logger.info('get_ionosphere_performance - anomalies_ts length - %s' % str(len(anomalies_ts)))
fp_type = 'all'
if 'fp_type' in request.args:
fp_type = request.args.get('fp_type', 'all')
# Get fp_ids
fp_ids = []
fp_ids_ts = []
fp_ids_cache_key = 'performance.%s.%s.fp_ids' % (request_key, timezone_str)
fp_ids_ts_cache_key = 'performance.%s.%s.fp_ids_ts' % (request_key, timezone_str)
if not redis_conn_decoded:
try:
redis_conn_decoded = get_redis_conn_decoded(skyline_app)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_ionosphere_performance :: get_redis_conn_decoded failed')
dev_null = e
try:
fp_ids_raw = redis_conn_decoded.get(fp_ids_cache_key)
except Exception as e:
trace = traceback.format_exc()
fail_msg = 'error :: get_ionosphere_performance - could not get Redis data for - %s' % fp_ids_cache_key
logger.error(trace)
logger.error(fail_msg)
dev_null = e
if fp_ids_raw:
try:
fp_ids = literal_eval(fp_ids_raw)
except Exception as e:
trace = traceback.format_exc()
fail_msg = 'error :: get_ionosphere_performance - could not get literal_eval Redis data from key - %s' % fp_ids_cache_key
logger.error(trace)
logger.error(fail_msg)
dev_null = e
if fp_ids:
logger.info('get_ionosphere_performance - using fp_ids from cache')
try:
fp_ids_ts_raw = redis_conn_decoded.get(fp_ids_ts_cache_key)
except Exception as e:
trace = traceback.format_exc()
fail_msg = 'error :: get_ionosphere_performance - could not get Redis data for - %s' % fp_ids_ts_cache_key
logger.error(trace)
logger.error(fail_msg)
dev_null = e
if fp_ids_ts_raw:
try:
fp_ids_ts = literal_eval(fp_ids_ts_raw)
except Exception as e:
trace = traceback.format_exc()
fail_msg = 'error :: get_ionosphere_performance - could not get literal_eval Redis data from key - %s' % fp_ids_ts_cache_key
logger.error(trace)
logger.error(fail_msg)
dev_null = e
if fp_ids_ts:
logger.info('get_ionosphere_performance - using fp_ids_ts from cache')
if not fp_ids or not fp_ids_ts:
try:
ionosphere_table, log_msg, trace = ionosphere_table_meta(skyline_app, engine)
logger.info(log_msg)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_ionosphere_performance - failed to get ionosphere_table meta')
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise # to webapp to return in the UI
try:
logger.info('get_ionosphere_performance - determining fp ids of type %s' % fp_type)
connection = engine.connect()
if metric_ids:
if fp_type == 'user':
# stmt = select([ionosphere_table.c.id, ionosphere_table.c.anomaly_timestamp], ionosphere_table.c.metric_id.in_(metric_ids)).\
stmt = select([ionosphere_table.c.id, ionosphere_table.c.metric_id, ionosphere_table.c.anomaly_timestamp]).\
where(ionosphere_table.c.enabled == 1).\
where(ionosphere_table.c.anomaly_timestamp <= end_timestamp).\
where(ionosphere_table.c.generation <= 1)
elif fp_type == 'learnt':
# stmt = select([ionosphere_table.c.id, ionosphere_table.c.anomaly_timestamp], ionosphere_table.c.metric_id.in_(metric_ids)).\
stmt = select([ionosphere_table.c.id, ionosphere_table.c.metric_id, ionosphere_table.c.anomaly_timestamp]).\
where(ionosphere_table.c.enabled == 1).\
where(ionosphere_table.c.anomaly_timestamp <= end_timestamp).\
where(ionosphere_table.c.generation >= 2)
else:
# stmt = select([ionosphere_table.c.id, ionosphere_table.c.anomaly_timestamp], ionosphere_table.c.metric_id.in_(metric_ids)).\
stmt = select([ionosphere_table.c.id, ionosphere_table.c.metric_id, ionosphere_table.c.anomaly_timestamp]).\
where(ionosphere_table.c.enabled == 1).\
where(ionosphere_table.c.anomaly_timestamp <= end_timestamp)
logger.info('get_ionosphere_performance - determining fp ids of type %s for metric_ids' % fp_type)
result = connection.execute(stmt)
elif metric_id:
if fp_type == 'user':
stmt = select([ionosphere_table.c.id, ionosphere_table.c.metric_id, ionosphere_table.c.anomaly_timestamp]).\
where(ionosphere_table.c.metric_id == int(metric_id)).\
where(ionosphere_table.c.enabled == 1).\
where(ionosphere_table.c.anomaly_timestamp <= end_timestamp).\
where(ionosphere_table.c.generation <= 1)
elif fp_type == 'learnt':
stmt = select([ionosphere_table.c.id, ionosphere_table.c.metric_id, ionosphere_table.c.anomaly_timestamp]).\
where(ionosphere_table.c.metric_id == int(metric_id)).\
where(ionosphere_table.c.enabled == 1).\
where(ionosphere_table.c.anomaly_timestamp <= end_timestamp).\
where(ionosphere_table.c.generation >= 2)
else:
stmt = select([ionosphere_table.c.id, ionosphere_table.c.metric_id, ionosphere_table.c.anomaly_timestamp]).\
where(ionosphere_table.c.metric_id == int(metric_id)).\
where(ionosphere_table.c.enabled == 1).\
where(ionosphere_table.c.anomaly_timestamp <= end_timestamp)
logger.info('get_ionosphere_performance - determining fp ids for metric_id')
result = connection.execute(stmt)
else:
if fp_type == 'user':
stmt = select([ionosphere_table.c.id, ionosphere_table.c.metric_id, ionosphere_table.c.anomaly_timestamp]).\
where(ionosphere_table.c.enabled == 1).\
where(ionosphere_table.c.anomaly_timestamp <= end_timestamp).\
where(ionosphere_table.c.generation <= 1)
elif fp_type == 'learnt':
stmt = select([ionosphere_table.c.id, ionosphere_table.c.metric_id, ionosphere_table.c.anomaly_timestamp]).\
where(ionosphere_table.c.enabled == 1).\
where(ionosphere_table.c.anomaly_timestamp <= end_timestamp).\
where(ionosphere_table.c.generation >= 2)
else:
stmt = select([ionosphere_table.c.id, ionosphere_table.c.metric_id, ionosphere_table.c.anomaly_timestamp]).\
where(ionosphere_table.c.enabled == 1).\
where(ionosphere_table.c.anomaly_timestamp <= end_timestamp)
logger.info('get_ionosphere_performance - determining fp ids for all metrics')
result = connection.execute(stmt)
for row in result:
r_metric_id = row['metric_id']
append_result = False
if r_metric_id == metric_id:
append_result = True
if r_metric_id in metric_ids:
append_result = True
if append_result:
fp_id = row['id']
anomaly_timestamp = row['anomaly_timestamp']
fp_ids.append(int(fp_id))
fp_ids_ts.append([int(anomaly_timestamp), int(fp_id)])
connection.close()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_ionosphere_performance - could not determine fp_ids')
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise
logger.info('get_ionosphere_performance - fp_ids_ts length - %s' % str(len(fp_ids_ts)))
if fp_ids:
if not redis_conn:
try:
redis_conn = get_redis_conn(skyline_app)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_redis_conn failed for get_ionosphere_performance')
dev_null = e
if redis_conn:
try:
logger.info('get_ionosphere_performance - setting Redis performance key with fp_ids containing %s items' % str(len(fp_ids)))
redis_conn.setex(fp_ids_cache_key, 600, str(fp_ids))
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_redis_conn failed to set - %s' % fp_ids_cache_key)
dev_null = e
if fp_ids_ts:
if not redis_conn:
try:
redis_conn = get_redis_conn(skyline_app)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_redis_conn failed for get_ionosphere_performance')
dev_null = e
if redis_conn:
try:
logger.info('get_ionosphere_performance - setting Redis performance key with fp_ids_ts containing %s items' % str(len(fp_ids_ts)))
redis_conn.setex(fp_ids_ts_cache_key, 600, str(fp_ids_ts))
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_redis_conn failed to set - %s' % fp_ids_ts_cache_key)
dev_null = e
# Get fp matches
try:
ionosphere_matched_table, log_msg, trace = ionosphere_matched_table_meta(skyline_app, engine)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_ionosphere_performance - failed to get ionosphere_matched_table_meta_table meta')
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise # to webapp to return in the UI
fps_matched_ts = []
if fp_ids:
try:
connection = engine.connect()
# stmt = select([ionosphere_matched_table.c.id, ionosphere_matched_table.c.metric_timestamp], ionosphere_matched_table.c.fp_id.in_(fp_ids)).\
stmt = select([ionosphere_matched_table.c.id, ionosphere_matched_table.c.fp_id, ionosphere_matched_table.c.metric_timestamp]).\
where(ionosphere_matched_table.c.metric_timestamp >= start_timestamp).\
where(ionosphere_matched_table.c.metric_timestamp <= end_timestamp)
result = connection.execute(stmt)
for row in result:
append_result = False
if metric == 'all' and metric_like == 'all':
append_result = True
if not append_result:
fp_id = row['fp_id']
if fp_id in fp_ids:
append_result = True
if append_result:
matched_id = row['id']
metric_timestamp = row['metric_timestamp']
fps_matched_ts.append([int(metric_timestamp), int(matched_id)])
connection.close()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_ionosphere_performance - could not determine timestamps from ionosphere_matched')
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise
logger.info('get_ionosphere_performance - fps_matched_ts - %s' % str(len(fps_matched_ts)))
# Get layers matches
try:
ionosphere_layers_matched_table, log_msg, trace = ionosphere_layers_matched_table_meta(skyline_app, engine)
except Exception as e:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: get_ionosphere_performance - failed to get ionosphere_layers_matched_table meta'
logger.error('%s' % fail_msg)
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise # to webapp to return in the UI
layers_matched_ts = []
if fp_ids:
try:
connection = engine.connect()
# stmt = select([ionosphere_layers_matched_table.c.id, ionosphere_layers_matched_table.c.anomaly_timestamp], ionosphere_layers_matched_table.c.fp_id.in_(fp_ids)).\
stmt = select([ionosphere_layers_matched_table.c.id, ionosphere_layers_matched_table.c.fp_id, ionosphere_layers_matched_table.c.anomaly_timestamp]).\
where(ionosphere_layers_matched_table.c.anomaly_timestamp >= start_timestamp).\
where(ionosphere_layers_matched_table.c.anomaly_timestamp <= end_timestamp)
result = connection.execute(stmt)
for row in result:
append_result = False
if metric == 'all' and metric_like == 'all':
append_result = True
if not append_result:
fp_id = row['fp_id']
if fp_id in fp_ids:
append_result = True
if append_result:
matched_layers_id = row['id']
matched_timestamp = row['anomaly_timestamp']
layers_matched_ts.append([int(matched_timestamp), int(matched_layers_id)])
connection.close()
except Exception as e:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: get_ionosphere_performance - could not determine timestamps from ionosphere_layers_matched'
logger.error('%s' % fail_msg)
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise # to webapp to return in the UI
logger.info('get_ionosphere_performance - layers_matched_ts - %s' % str(len(layers_matched_ts)))
anomalies_df = []
if anomalies_ts:
try:
anomalies_df = pd.DataFrame(anomalies_ts, columns=['date', 'id'])
anomalies_df['date'] = pd.to_datetime(anomalies_df['date'], unit='s')
# @added 20210202 - Feature #3934: ionosphere_performance
# Handle user timezone
if timezone_str != 'UTC':
anomalies_df['date'] = anomalies_df['date'].dt.tz_localize('UTC').dt.tz_convert(user_timezone)
anomalies_df = anomalies_df.set_index(pd.DatetimeIndex(anomalies_df['date']))
anomalies_df = anomalies_df.resample(frequency).apply({'id': 'count'})
anomalies_df.rename(columns={'id': 'anomaly_count'}, inplace=True)
if ionosphere_performance_debug:
fname_out = '%s/%s.anomalies_df.csv' % (settings.SKYLINE_TMP_DIR, request_key)
anomalies_df.to_csv(fname_out)
except Exception as e:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: get_ionosphere_performance - could not create anomalies_df'
logger.error('%s' % fail_msg)
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise # to webapp to return in the UI
fp_ids_df = []
fps_total_df = []
if fp_ids_ts:
try:
fp_ids_df = pd.DataFrame(fp_ids_ts, columns=['date', 'id'])
fp_ids_df['date'] = pd.to_datetime(fp_ids_df['date'], unit='s')
# @added 20210202 - Feature #3934: ionosphere_performance
# Handle user timezone
if timezone_str != 'UTC':
fp_ids_df['date'] = fp_ids_df['date'].dt.tz_localize('UTC').dt.tz_convert(user_timezone)
fp_ids_df = fp_ids_df.set_index(pd.DatetimeIndex(fp_ids_df['date']))
fp_ids_df = fp_ids_df.resample(frequency).apply({'id': 'count'})
fps_total_df = fp_ids_df.cumsum()
fp_ids_df.rename(columns={'id': 'new_fps_count'}, inplace=True)
fps_total_df.rename(columns={'id': 'fps_total_count'}, inplace=True)
if ionosphere_performance_debug:
fname_out = '%s/%s.fp_ids_df.csv' % (settings.SKYLINE_TMP_DIR, request_key)
fp_ids_df.to_csv(fname_out)
fname_out = '%s/%s.fps_total_df.csv' % (settings.SKYLINE_TMP_DIR, request_key)
fps_total_df.to_csv(fname_out)
except Exception as e:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: get_ionosphere_performance - could not create fp_ids_df'
logger.error('%s' % fail_msg)
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise # to webapp to return in the UI
fps_matched_df = []
if fps_matched_ts:
try:
fps_matched_df = pd.DataFrame(fps_matched_ts, columns=['date', 'id'])
fps_matched_df['date'] = pd.to_datetime(fps_matched_df['date'], unit='s')
# @added 20210202 - Feature #3934: ionosphere_performance
# Handle user timezone
if timezone_str != 'UTC':
fps_matched_df['date'] = fps_matched_df['date'].dt.tz_localize('UTC').dt.tz_convert(user_timezone)
fps_matched_df = fps_matched_df.set_index(pd.DatetimeIndex(fps_matched_df['date']))
fps_matched_df = fps_matched_df.resample(frequency).apply({'id': 'count'})
fps_matched_df.rename(columns={'id': 'fps_matched_count'}, inplace=True)
if ionosphere_performance_debug:
fname_out = '%s/%s.fps_matched_df.csv' % (settings.SKYLINE_TMP_DIR, request_key)
fps_matched_df.to_csv(fname_out)
except Exception as e:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: get_ionosphere_performance - could not create fps_matched_df'
logger.error('%s' % fail_msg)
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise # to webapp to return in the UI
layers_matched_df = []
if layers_matched_ts:
try:
layers_matched_df = pd.DataFrame(layers_matched_ts, columns=['date', 'id'])
layers_matched_df['date'] = pd.to_datetime(layers_matched_df['date'], unit='s')
# @added 20210202 - Feature #3934: ionosphere_performance
# Handle user timezone
if timezone_str != 'UTC':
layers_matched_df['date'] = layers_matched_df['date'].dt.tz_localize('UTC').dt.tz_convert(user_timezone)
layers_matched_df = layers_matched_df.set_index(pd.DatetimeIndex(layers_matched_df['date']))
layers_matched_df = layers_matched_df.resample(frequency).apply({'id': 'count'})
layers_matched_df.rename(columns={'id': 'layers_matched_count'}, inplace=True)
if ionosphere_performance_debug:
fname_out = '%s/%s.layers_matched_df.csv' % (settings.SKYLINE_TMP_DIR, request_key)
layers_matched_df.to_csv(fname_out)
except Exception as e:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: get_ionosphere_performance - could not create layers_matched_df'
logger.error('%s' % fail_msg)
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise # to webapp to return in the UI
date_list = pd.date_range(begin_date, end_date, freq=frequency)
date_list = date_list.format(formatter=lambda x: x.strftime('%Y-%m-%d'))
use_end_date = end_date
if not date_list:
date_list = pd.date_range(begin_date, extended_end_date, freq=frequency)
date_list = date_list.format(formatter=lambda x: x.strftime('%Y-%m-%d'))
use_end_date = extended_end_date
# logger.debug('debug :: get_ionosphere_performance - date_list - %s' % str(date_list))
# performance_df = pd.DataFrame(date_list, columns=['date'])
performance_df = pd.DataFrame({'date': pd.date_range(begin_date, use_end_date, freq=frequency), 'day': date_list})
# @added 20210202 - Feature #3934: ionosphere_performance
# Handle user timezone
if timezone_str != 'UTC':
# It is already timezone aware in the begin_date and end_date, so I one
# just reassign the same tzinfo to the date index?
# Fuck yeah!!! (I sound like Colt Bennet)
# performance_df['date'] = performance_df['date'].dt.tz_localize('UTC').dt.tz_convert(user_timezone)
performance_df['date'] = performance_df['date'].dt.tz_localize(user_timezone).dt.tz_convert(user_timezone)
# performance_df = performance_df.set_index(pd.DatetimeIndex(performance_df['date']))
performance_df = performance_df.set_index(['date'])
if len(anomalies_df) > 0 and report_anomalies:
performance_df = pd.merge(performance_df, anomalies_df, how='outer', on='date')
performance_df.sort_values('date')
performance_df['anomaly_count'] = performance_df['anomaly_count'].fillna(0)
if len(anomalies_df) == 0 and report_anomalies:
performance_df['anomaly_count'] = 0
# Report new fp count per day
report_new_fps = False
if 'new_fps' in request.args:
new_fps_str = request.args.get('new_fps', 'false')
if new_fps_str == 'true':
report_new_fps = True
if len(fp_ids_df) > 0 and report_new_fps:
if yesterday_data:
new_fp_ids_df = fp_ids_df.loc[yesterday_end_date:use_end_date]
performance_df = pd.merge(performance_df, new_fp_ids_df, how='outer', on='date')
del new_fp_ids_df
else:
performance_df = pd.merge(performance_df, fp_ids_df, how='outer', on='date')
performance_df.sort_values('date')
performance_df['new_fps_count'] = performance_df['new_fps_count'].fillna(0)
# else:
# performance_df['new_fps_count'] = 0
# Report running total fp count per day
report_total_fps = False
if 'total_fps' in request.args:
total_fps_str = request.args.get('total_fps', 'false')
if total_fps_str == 'true':
report_total_fps = True
if len(fps_total_df) > 0 and report_total_fps:
if yesterday_data:
new_fps_total = fps_total_df.loc[yesterday_end_date:use_end_date]
performance_df = pd.merge(performance_df, new_fps_total, how='outer', on='date')
else:
performance_df = pd.merge(performance_df, fps_total_df, how='outer', on='date')
performance_df.sort_values('date')
performance_df['fps_total_count'].fillna(method='ffill', inplace=True)
logger.info('get_ionosphere_performance - second step of creating performance_df complete, dataframe length - %s' % str(len(performance_df)))
if len(fps_total_df) == 0 and report_total_fps:
performance_df['fps_total_count'] = 0
# Report fps_matched_count per day
report_fps_matched_count = False
if 'fps_matched_count' in request.args:
fps_matched_count_str = request.args.get('fps_matched_count', 'false')
if fps_matched_count_str == 'true':
report_fps_matched_count = True
# Report layers_matched_count per day
report_layers_matched_count = False
if 'layers_matched_count' in request.args:
layers_matched_count_str = request.args.get('layers_matched_count', 'false')
if layers_matched_count_str == 'true':
report_layers_matched_count = True
# Report sum_matches per day
report_sum_matches = False
if 'sum_matches' in request.args:
sum_matches_str = request.args.get('sum_matches', 'false')
if sum_matches_str == 'true':
report_sum_matches = True
if len(fps_matched_df) > 0 and report_fps_matched_count and not report_sum_matches:
performance_df = pd.merge(performance_df, fps_matched_df, how='outer', on='date')
performance_df.sort_values('date')
performance_df['fps_matched_count'] = performance_df['fps_matched_count'].fillna(0)
logger.info('get_ionosphere_performance - third step of creating performance_df complete, dataframe length - %s' % str(len(performance_df)))
if len(fps_matched_df) == 0 and report_fps_matched_count and not report_sum_matches:
performance_df['fps_matched_count'] = 0
if len(layers_matched_df) > 0 and report_layers_matched_count and not report_sum_matches:
performance_df = pd.merge(performance_df, layers_matched_df, how='outer', on='date')
performance_df.sort_values('date')
performance_df['layers_matched_count'] = performance_df['layers_matched_count'].fillna(0)
logger.info('get_ionosphere_performance - fourth step of creating performance_df complete, dataframe length - %s' % str(len(performance_df)))
if len(layers_matched_df) == 0 and report_layers_matched_count and not report_sum_matches:
performance_df['layers_matched_count'] = 0
if report_sum_matches:
logger.info('get_ionosphere_performance - creating matches_sum_df to calculate totals and merge with performance_df')
matches_sum_df = pd.DataFrame({'date': pd.date_range(begin_date, use_end_date, freq=frequency), 'day': date_list})
if timezone_str != 'UTC':
matches_sum_df['date'] = matches_sum_df['date'].dt.tz_localize('UTC').dt.tz_convert(user_timezone)
matches_sum_df = matches_sum_df.set_index(['date'])
if len(fps_matched_df) > 0:
matches_sum_df = pd.merge(matches_sum_df, fps_matched_df, how='outer', on='date')
matches_sum_df.sort_values('date')
matches_sum_df['fps_matched_count'] = matches_sum_df['fps_matched_count'].fillna(0)
if len(fps_matched_df) == 0:
matches_sum_df['fps_matched_count'] = 0
if len(layers_matched_df) > 0:
matches_sum_df = pd.merge(matches_sum_df, layers_matched_df, how='outer', on='date')
matches_sum_df.sort_values('date')
matches_sum_df['layers_matched_count'] = matches_sum_df['layers_matched_count'].fillna(0)
if len(layers_matched_df) == 0:
matches_sum_df['layers_matched_count'] = 0
matches_sum_df['total_matches'] = matches_sum_df['fps_matched_count'] + matches_sum_df['layers_matched_count']
sum_df = matches_sum_df[['total_matches']].copy()
logger.info('get_ionosphere_performance - sum_df has %s rows' % str(len(sum_df)))
performance_df = pd.merge(performance_df, sum_df, how='outer', on='date')
performance_df.sort_values('date')
performance_df['total_matches'] = performance_df['total_matches'].fillna(0)
if yesterday_data:
ydf = | pd.DataFrame(yesterday_data) | pandas.DataFrame |
from neurovault.apps.statmaps.tasks import save_resampled_transformation_single
from neurovault.apps.statmaps.tests.utils import (clearDB, save_statmap_form)
from neurovault.apps.statmaps.models import (Collection)
from django.contrib.auth.models import User
from django.test import TestCase, Client
import pandas as pd
import os.path
import json
class TestGeneDecoding(TestCase):
_map = None
@classmethod
def setUpClass(cls):
cls.test_path = os.path.abspath(os.path.dirname(__file__))
cls.user, _ = User.objects.get_or_create(username='neurovault')
cls.client = Client()
cls.client.login(username=cls.user)
cls.Collection1 = Collection(name='Collection1', owner=cls.user)
cls.Collection1.save()
nii_path = os.path.join(
cls.test_path, cls._map)
map = save_statmap_form(
image_path=nii_path, collection=cls.Collection1)
save_resampled_transformation_single(map.pk)
response = json.loads(cls.client.get("/images/%d/gene_expression/json?mask=full" % map.pk, follow=True).content)
cls.df = | pd.DataFrame(response["data"], columns=response["columns"]) | pandas.DataFrame |
#!/usr/bin/env python3
"""
Created: March 10th, 2020
@author: <NAME>
PlotDecomposition works with matrix formats SigProfiler SBS-96, SBS-1536, DBS-78,
and ID-83. This program is intended to take two matrices.
(1) Sample matrix - A SigProfiler formatted SBS-96, SBS-1536, DBS-78, or ID-83
matrix.
(2) Basis matrix - A SigProfiler formatted SBS-96, SBS-1536, DBS-78, or ID-83
matrix that is the decomposition of (1).
When running the function 'run_PlotDecomposition' a plot of the decomposition will
be generated and saved to the output folder. Refer to the function below to learn
more about the parameters required to generate the decomposition plot.
"""
import os
import pandas as pd
import numpy as np
import scipy.stats
import sigProfilerPlotting as pltCNV
from SigProfilerExtractor import SigProfilerPlottingMatrix as sigPlt
from SigProfilerExtractor import PlotDecomposition_SBS96 as spd_96
from SigProfilerExtractor import PlotDecomposition_SBS288 as spd_288
from SigProfilerExtractor import PlotDecomposition_SBS1536 as spd_1536
from SigProfilerExtractor import PlotDecomposition_DBS78 as spd_78
from SigProfilerExtractor import PlotDecomposition_ID83 as spd_83
from SigProfilerExtractor import PlotDecomposition_CNV48 as cnv_48
from SigProfilerExtractor import subroutines as sub
# imports for working with plots in memory
import io
from PIL import Image
from reportlab.lib.utils import ImageReader
# Global Variables
SBS_CONTEXTS = ["6", "24", "96", "288", "384", "1536", "6144"]
DBS_CONTEXTS = ["78", "186", "1248", "2976"]
ID_CONTEXTS = ["28", "83", "415"]
CNV_CONTEXTS = ["48"]
mtype_options = ["6", "24", "96", "384", "1536", "6144", "28", "83", "415", "78", "186", "1248", "2976"]
# Helper function for converting BytesIO to image so it can be plotted by reportlab
def bytes_to_img(byte_png):
byte_png.seek(0)
tmp_im=Image.open(byte_png)
image = ImageReader(tmp_im)
return image
# Helper function to convert byte array to image array
def open_byte_to_img_dict(byte_dict):
img_dict = dict()
for name in byte_dict.keys():
tmp_img = bytes_to_img(byte_dict[name])
img_dict[name] = tmp_img
return img_dict
def calculate_similarities(denovo, denovo_name, est_denovo):
from numpy import inf
# If matrix is 1536 context, then collapse it to 96 format
if denovo.shape[0]==1536:
index = denovo.iloc[:,0]
denovo_tmp = pd.DataFrame(denovo, index=index)
denovo_tmp = denovo.groupby(denovo_tmp.index.str[1:8]).sum()
denovo = pd.DataFrame(denovo_tmp)
denovo = denovo.reset_index()
elif denovo.shape[0]==288:
index = denovo.iloc[:,0]
denovo_tmp = pd.DataFrame(denovo, index=index)
denovo_tmp = denovo.groupby(denovo_tmp.index.str[2:9]).sum()
denovo = pd.DataFrame(denovo_tmp)
denovo = denovo.reset_index()
sample_names = [denovo_name]
if sample_names is False:
sample_names = ["None"]*denovo.shape[1]
cosine_similarity_list = []
cosine_distance_list = []
correlation_list = []
correlation_distance_list = []
kl_divergence_list = []
l1_norm_list = []
l2_norm_list = []
relative_l1_list = []
relative_l2_list = []
p_i = denovo[denovo_name]
q_i = est_denovo
cosine_similarity_list.append(round(sub.cos_sim(p_i,q_i ),3))
cosine_distance_list.append(round(scipy.spatial.distance.cosine(p_i, q_i),3))
correlation_list.append(round(scipy.stats.pearsonr(p_i,q_i)[0],3))
correlation_distance_list.append(round(1-scipy.stats.pearsonr(p_i,q_i)[0],3))
kl_divergence_list.append(round(scipy.stats.entropy(p_i,q_i),4))
l1_norm_list.append(round(np.linalg.norm(p_i-q_i , ord=1),2))
relative_l1_list.append(round((l1_norm_list[-1]/np.linalg.norm(p_i, ord=1))*100,3))
l2_norm_list.append(round(np.linalg.norm(p_i-q_i , ord=2),2))
relative_l2_list.append(round((l2_norm_list[-1]/np.linalg.norm(p_i, ord=2))*100,3))
kl_divergence_list = np.array(kl_divergence_list)
kl_divergence_list[kl_divergence_list == inf] =1000
similarities_dataframe = pd.DataFrame({"Sample Names": sample_names, \
"Cosine Similarity": cosine_similarity_list, \
"Cosine Distance": cosine_distance_list, \
"Correlation Distance": correlation_distance_list, \
"Correlation Coefficient": correlation_list, \
"L1 Norm": l1_norm_list, \
"L1 Norm %":relative_l1_list, \
"L2 Norm": l2_norm_list, \
"L2 Norm %": relative_l2_list, \
"KL Divergence": kl_divergence_list})
similarities_dataframe = similarities_dataframe.set_index("Sample Names")
return similarities_dataframe
# Determine if the matrix matches the format indicated by mtype
def matrix_is_formatted(mtx, mtype):
# Files to be checked against
FormatFiles_path = "src/FormatFiles/"
example_SBS6 = FormatFiles_path + "Sample_Files.SBS6.all"
example_SBS24 = FormatFiles_path + "Sample_Files.SBS24.all"
example_SBS96 = FormatFiles_path + "Sample_Files.SBS96.all"
example_SBS384 = FormatFiles_path + "Sample_Files.SBS384.all"
example_SBS1536 = FormatFiles_path + "Sample_Files.SBS1536.all"
example_SBS6144 = FormastFiles_path + "Sample_Files.SBS6144.all"
example_ID28 = FormatFiles_path + "Sample_Files.ID28.all"
example_ID83 = FormatFiles_path + "Sample_Files.ID83.all"
example_ID415 = FormatFiles_path + "Sample_Files.ID415.all"
example_DBS78 = FormatFiles_path + "Sample_Files.DBS78.all"
example_DBS186 = FormatFiles_path + "Sample_Files.DBS186.all"
example_DBS1248 = FormatFiles_path + "Sample_Files.DBS1248.all"
example_DBS2976 = FormatFiles_path + "Sample_Files.DBS2976.all"
example_files = [example_SBS6, example_SBS24, example_SBS96, example_SBS384, \
example_SBS1536, example_SBS6144, example_ID28, example_ID83, example_ID415, \
example_DBS78, example_DBS186, example_DBS1248, example_DBS2976]
if mtype not in mtype_options:
raise Exception('Input context format does not match any of the' +
' existing supported contexts. The full list is: '
+ str(mtype_options))
return False
# check that the input matrix has the correct format
f1_names = mtx.iloc[:,0]
file_index = mtype_options.index(mtype)
ref_file = pd.read_csv(example_files[file_index], sep="\t")
ref_names = ref_file.iloc[:,0]
if (f1_names.equals(ref_names)):
return True
else:
return False
def genSBS_pngs(denovo_mtx, basis_mtx, output_path, project, mtype):
denovo_plots = dict()
basis_plots = dict()
if mtype == "1536" or mtype == "288":
denovo_plots = sigPlt.plotSBS(denovo_mtx, output_path, project, mtype, True)
basis_plots = sigPlt.plotSBS(basis_mtx, output_path, project, "96", True)
elif mtype == "96":
denovo_plots = sigPlt.plotSBS(denovo_mtx, output_path, project, mtype, True)
basis_plots = sigPlt.plotSBS(basis_mtx, output_path, project, mtype, True)
return denovo_plots,basis_plots
def genDBS_pngs(denovo_mtx, basis_mtx, output_path, project, mtype):
denovo_plots = dict()
basis_plots = dict()
denovo_plots = sigPlt.plotDBS(denovo_mtx, output_path, project, mtype, True)
basis_plots = sigPlt.plotDBS(basis_mtx, output_path, project, mtype, True)
return denovo_plots,basis_plots
def genID_pngs(denovo_mtx, basis_mtx, output_path, project, mtype):
denovo_plots = dict()
basis_plots = dict()
denovo_plots = sigPlt.plotID(denovo_mtx, output_path, project, mtype, True)
basis_plots = sigPlt.plotID(basis_mtx, output_path, project, mtype, True)
return denovo_plots,basis_plots
def genCNV_pngs(denovo_mtx, basis_mtx, output_path, project, mtype):
denovo_plots = dict()
basis_plots = dict()
denovo_plots = pltCNV.plotCNV(denovo_mtx, output_path, project, plot_type="pdf", percentage=True, aggregate=False, read_from_file=False, write_to_file=False)
basis_plots = pltCNV.plotCNV(basis_mtx, output_path, project, plot_type="pdf", percentage=True, aggregate=False, read_from_file=False, write_to_file=False)
return denovo_plots,basis_plots
# signames, weights
def gen_sub_plots(denovo_mtx, basis_mtx, output_path, project, mtype):
if mtype in SBS_CONTEXTS:
if not os.path.exists(output_path):
os.makedirs(output_path)
denovo_plots,basis_plots=genSBS_pngs(denovo_mtx, basis_mtx, output_path, project, mtype)
return denovo_plots,basis_plots
elif mtype in DBS_CONTEXTS:
if not os.path.exists(output_path):
os.makedirs(output_path)
denovo_plots,basis_plots=genDBS_pngs(denovo_mtx, basis_mtx, output_path, project, mtype)
return denovo_plots,basis_plots
elif mtype in ID_CONTEXTS:
if not os.path.exists(output_path):
os.makedirs(output_path)
denovo_plots,basis_plots=genID_pngs(denovo_mtx, basis_mtx, output_path, project, mtype)
return denovo_plots,basis_plots
elif mtype in CNV_CONTEXTS:
if not os.path.exists(output_path):
os.makedirs(output_path)
denovo_plots, basis_plots=genCNV_pngs(denovo_mtx, basis_mtx, output_path, project, mtype)
return denovo_plots,basis_plots
else:
print("ERROR: mtype is " + mtype + " and is not yet supported.")
# generate the plot for the reconstruction
def gen_reconstructed_png(denovo_name, basis_mtx, basis_names, weights, output_path, project, mtype):
reconstruction_plot=dict()
col_names=[denovo_name]
mut_col = basis_mtx.iloc[:,0]
recon_plot = basis_mtx[basis_names[0]]*float(weights[0].strip("%"))/100
for i in range(1,len(weights)):
recon_plot = recon_plot + basis_mtx[basis_names[i]]*(float(weights[i].strip("%"))/100)
recon_plot = | pd.Series(recon_plot, name=denovo_name) | pandas.Series |
import wandb
from wandb import data_types
import numpy as np
import pytest
import os
import sys
import datetime
from wandb.sdk.data_types._dtypes import *
class_labels = {1: "tree", 2: "car", 3: "road"}
test_folder = os.path.dirname(os.path.realpath(__file__))
im_path = os.path.join(test_folder, "..", "assets", "test.png")
def test_none_type():
assert TypeRegistry.type_of(None) == NoneType()
assert TypeRegistry.type_of(None).assign(None) == NoneType()
assert TypeRegistry.type_of(None).assign(1) == InvalidType()
def test_string_type():
assert TypeRegistry.type_of("Hello") == StringType()
assert TypeRegistry.type_of("Hello").assign("World") == StringType()
assert TypeRegistry.type_of("Hello").assign(None) == InvalidType()
assert TypeRegistry.type_of("Hello").assign(1) == InvalidType()
def test_number_type():
assert TypeRegistry.type_of(1.2) == NumberType()
assert TypeRegistry.type_of(1.2).assign(1) == NumberType()
assert TypeRegistry.type_of(1.2).assign(None) == InvalidType()
assert TypeRegistry.type_of(1.2).assign("hi") == InvalidType()
def make_datetime():
return datetime.datetime(2000, 12, 1)
def make_date():
return datetime.date(2000, 12, 1)
def make_datetime64():
return np.datetime64("2000-12-01")
def test_timestamp_type():
assert TypeRegistry.type_of(make_datetime()) == TimestampType()
assert (
TypeRegistry.type_of(make_datetime())
.assign(make_date())
.assign(make_datetime64())
== TimestampType()
)
assert TypeRegistry.type_of(make_datetime()).assign(None) == InvalidType()
assert TypeRegistry.type_of(make_datetime()).assign(1) == InvalidType()
def test_boolean_type():
assert TypeRegistry.type_of(True) == BooleanType()
assert TypeRegistry.type_of(True).assign(False) == BooleanType()
assert TypeRegistry.type_of(True).assign(None) == InvalidType()
assert TypeRegistry.type_of(True).assign(1) == InvalidType()
def test_any_type():
assert AnyType() == AnyType().assign(1)
assert AnyType().assign(None) == InvalidType()
def test_never_type():
assert InvalidType().assign(1) == InvalidType()
assert InvalidType().assign("a") == InvalidType()
assert InvalidType().assign(True) == InvalidType()
assert InvalidType().assign(None) == InvalidType()
def test_unknown_type():
assert UnknownType().assign(1) == NumberType()
assert UnknownType().assign(None) == InvalidType()
def test_union_type():
wb_type = UnionType([float, str])
assert wb_type.assign(1) == wb_type
assert wb_type.assign("s") == wb_type
assert wb_type.assign(True) == InvalidType()
wb_type = UnionType([float, AnyType()])
assert wb_type.assign(1) == wb_type
assert wb_type.assign("s") == wb_type
assert wb_type.assign(True) == wb_type
wb_type = UnionType([float, UnknownType()])
assert wb_type.assign(1) == wb_type
assert wb_type.assign("s") == UnionType([float, StringType()])
assert wb_type.assign(None) == InvalidType()
wb_type = UnionType([float, OptionalType(UnknownType())])
assert wb_type.assign(None).assign(True) == UnionType(
[float, OptionalType(BooleanType())]
)
wb_type = UnionType([float, UnionType([str, UnknownType()])])
assert wb_type.assign(1) == wb_type
assert wb_type.assign("s") == wb_type
assert wb_type.assign(True) == UnionType([float, str, bool])
assert wb_type.assign(None) == InvalidType()
def test_const_type():
wb_type = ConstType(1)
assert wb_type.assign(1) == wb_type
assert wb_type.assign("a") == InvalidType()
assert wb_type.assign(2) == InvalidType()
def test_set_const_type():
wb_type = ConstType(set())
assert wb_type.assign(set()) == wb_type
assert wb_type.assign(None) == InvalidType()
assert wb_type.assign({1}) == InvalidType()
assert wb_type.assign([]) == InvalidType()
wb_type = ConstType({1, 2, 3})
assert wb_type.assign(set()) == InvalidType()
assert wb_type.assign(None) == InvalidType()
assert wb_type.assign({1, 2, 3}) == wb_type
assert wb_type.assign([1, 2, 3]) == InvalidType()
def test_object_type():
wb_type = TypeRegistry.type_of(np.random.rand(30))
assert wb_type.assign(np.random.rand(30)) == wb_type
assert wb_type.assign(4) == InvalidType()
def test_list_type():
assert ListType(int).assign([]) == ListType(int, 0)
assert ListType(int).assign([1, 2, 3]) == ListType(int, 3)
assert ListType(int).assign([1, "a", 3]) == InvalidType()
def test_dict_type():
spec = {
"number": float,
"nested": {
"list_str": [str],
},
}
exact = {
"number": 1,
"nested": {
"list_str": ["hello", "world"],
},
}
subset = {"nested": {"list_str": ["hi"]}}
narrow = {"number": 1, "string": "hi"}
wb_type = TypeRegistry.type_of(exact)
assert wb_type.assign(exact) == wb_type
assert wb_type.assign(subset) == InvalidType()
assert wb_type.assign(narrow) == InvalidType()
spec = {
"optional_number": OptionalType(float),
"optional_unknown": OptionalType(UnknownType()),
}
wb_type = TypedDictType(spec)
assert wb_type.assign({}) == wb_type
assert wb_type.assign({"optional_number": 1}) == wb_type
assert wb_type.assign({"optional_number": "1"}) == InvalidType()
assert wb_type.assign({"optional_unknown": "hi"}) == TypedDictType(
{
"optional_number": OptionalType(float),
"optional_unknown": OptionalType(str),
}
)
assert wb_type.assign({"optional_unknown": None}) == TypedDictType(
{
"optional_number": OptionalType(float),
"optional_unknown": OptionalType(UnknownType()),
}
)
wb_type = TypedDictType({"unknown": UnknownType()})
assert wb_type.assign({}) == InvalidType()
assert wb_type.assign({"unknown": None}) == InvalidType()
assert wb_type.assign({"unknown": 1}) == TypedDictType(
{"unknown": float},
)
def test_nested_dict():
notation_type = TypedDictType(
{
"a": float,
"b": bool,
"c": str,
"d": UnknownType(),
"e": {},
"f": [],
"g": [
[
{
"a": float,
"b": bool,
"c": str,
"d": UnknownType(),
"e": {},
"f": [],
"g": [[]],
}
]
],
}
)
expanded_type = TypedDictType(
{
"a": NumberType(),
"b": BooleanType(),
"c": StringType(),
"d": UnknownType(),
"e": TypedDictType({}),
"f": ListType(),
"g": ListType(
ListType(
TypedDictType(
{
"a": NumberType(),
"b": BooleanType(),
"c": StringType(),
"d": UnknownType(),
"e": TypedDictType({}),
"f": ListType(),
"g": ListType(ListType()),
}
)
)
),
}
)
example = {
"a": 1,
"b": True,
"c": "StringType()",
"d": "hi",
"e": {},
"f": [1],
"g": [
[
{
"a": 2,
"b": False,
"c": "StringType()",
"d": 3,
"e": {},
"f": [],
"g": [[5]],
}
]
],
}
real_type = TypedDictType.from_obj(example)
assert notation_type == expanded_type
assert notation_type.assign(example) == real_type
def test_image_type():
wb_type = data_types._ImageFileType()
image_simple = data_types.Image(np.random.rand(10, 10))
wb_type_simple = data_types._ImageFileType.from_obj(image_simple)
image_annotated = data_types.Image(
np.random.rand(10, 10),
boxes={
"box_predictions": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
"box_ground_truth": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
},
masks={
"mask_predictions": {
"mask_data": np.random.randint(0, 4, size=(30, 30)),
"class_labels": class_labels,
},
"mask_ground_truth": {"path": im_path, "class_labels": class_labels},
},
)
wb_type_annotated = data_types._ImageFileType.from_obj(image_annotated)
image_annotated_differently = data_types.Image(
np.random.rand(10, 10),
boxes={
"box_predictions": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
},
masks={
"mask_predictions": {
"mask_data": np.random.randint(0, 4, size=(30, 30)),
"class_labels": class_labels,
},
"mask_ground_truth_2": {"path": im_path, "class_labels": class_labels},
},
)
assert wb_type.assign(image_simple) == wb_type_simple
assert wb_type.assign(image_annotated) == wb_type_annotated
# OK to assign Images with disjoint class set
assert wb_type_annotated.assign(image_simple) == wb_type_annotated
# Merge when disjoint
assert wb_type_annotated.assign(
image_annotated_differently
) == data_types._ImageFileType(
box_layers={"box_predictions": {1, 2, 3}, "box_ground_truth": {1, 2, 3}},
box_score_keys={"loss", "acc"},
mask_layers={
"mask_ground_truth_2": set(),
"mask_ground_truth": set(),
"mask_predictions": {1, 2, 3},
},
class_map={"1": "tree", "2": "car", "3": "road"},
)
def test_classes_type():
wb_classes = data_types.Classes(
[
{"id": 1, "name": "cat"},
{"id": 2, "name": "dog"},
{"id": 3, "name": "horse"},
]
)
wb_class_type = (
wandb.wandb_sdk.data_types.helper_types.classes._ClassesIdType.from_obj(
wb_classes
)
)
assert wb_class_type.assign(1) == wb_class_type
assert wb_class_type.assign(0) == InvalidType()
def test_table_type():
table_1 = wandb.Table(columns=["col"], data=[[1]])
t1 = data_types._TableType.from_obj(table_1)
table_2 = wandb.Table(columns=["col"], data=[[1.3]])
table_3 = wandb.Table(columns=["col"], data=[["a"]])
assert t1.assign(table_2) == t1
assert t1.assign(table_3) == InvalidType()
def test_table_implicit_types():
table = wandb.Table(columns=["col"])
table.add_data(None)
table.add_data(1)
with pytest.raises(TypeError):
table.add_data("a")
table = wandb.Table(columns=["col"], optional=False)
with pytest.raises(TypeError):
table.add_data(None)
table.add_data(1)
with pytest.raises(TypeError):
table.add_data("a")
def test_table_allow_mixed_types():
table = wandb.Table(columns=["col"], allow_mixed_types=True)
table.add_data(None)
table.add_data(1)
table.add_data("a") # No error with allow_mixed_types
table = wandb.Table(columns=["col"], optional=False, allow_mixed_types=True)
with pytest.raises(TypeError):
table.add_data(None) # Still errors since optional is false
table.add_data(1)
table.add_data("a") # No error with allow_mixed_types
def test_tables_with_dicts():
good_data = [
[None],
[
{
"a": [
{
"b": 1,
"c": [
[
{
"d": 1,
"e": wandb.Image(
np.random.randint(255, size=(10, 10))
),
}
]
],
}
]
}
],
[
{
"a": [
{
"b": 1,
"c": [
[
{
"d": 1,
"e": wandb.Image(
np.random.randint(255, size=(10, 10))
),
}
]
],
}
]
}
],
]
bad_data = [
[None],
[
{
"a": [
{
"b": 1,
"c": [
[
{
"d": 1,
"e": wandb.Image(
np.random.randint(255, size=(10, 10))
),
}
]
],
}
]
}
],
[
{
"a": [
{
"b": 1,
"c": [
[
{
"d": 1,
}
]
],
}
]
}
],
]
table = wandb.Table(columns=["A"], data=good_data, allow_mixed_types=True)
table = wandb.Table(columns=["A"], data=bad_data, allow_mixed_types=True)
table = wandb.Table(columns=["A"], data=good_data)
with pytest.raises(TypeError):
table = wandb.Table(columns=["A"], data=bad_data)
def test_table_explicit_types():
table = wandb.Table(columns=["a", "b"], dtype=int)
table.add_data(None, None)
table.add_data(1, 2)
with pytest.raises(TypeError):
table.add_data(1, "a")
table = wandb.Table(columns=["a", "b"], optional=False, dtype=[int, str])
with pytest.raises(TypeError):
table.add_data(None, None)
table.add_data(1, "a")
with pytest.raises(TypeError):
table.add_data("a", "a")
table = wandb.Table(columns=["a", "b"], optional=[False, True], dtype=[int, str])
with pytest.raises(TypeError):
table.add_data(None, None)
with pytest.raises(TypeError):
table.add_data(None, "a")
table.add_data(1, None)
table.add_data(1, "a")
with pytest.raises(TypeError):
table.add_data("a", "a")
def test_table_type_cast():
table = wandb.Table(columns=["type_col"])
table.add_data(1)
wb_classes = data_types.Classes(
[
{"id": 1, "name": "cat"},
{"id": 2, "name": "dog"},
{"id": 3, "name": "horse"},
]
)
table.cast("type_col", wb_classes.get_type())
table.add_data(2)
with pytest.raises(TypeError):
table.add_data(4)
box_annotation = {
"box_predictions": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
"box_ground_truth": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
}
mask_annotation = {
"mask_predictions": {
"mask_data": np.random.randint(0, 4, size=(30, 30)),
"class_labels": class_labels,
},
"mask_ground_truth": {"path": im_path, "class_labels": class_labels},
}
def test_table_specials():
table = wandb.Table(
columns=["image", "table"],
optional=False,
dtype=[data_types.Image, data_types.Table],
)
with pytest.raises(TypeError):
table.add_data(None, None)
# Infers specific types from first valid row
table.add_data(
data_types.Image(
np.random.rand(10, 10),
boxes=box_annotation,
masks=mask_annotation,
),
data_types.Table(data=[[1, True, None]]),
)
# Denies conflict
with pytest.raises(TypeError):
table.add_data(
"hello",
data_types.Table(data=[[1, True, None]]),
)
# Denies conflict
with pytest.raises(TypeError):
table.add_data(
data_types.Image(
np.random.rand(10, 10),
boxes=box_annotation,
masks=mask_annotation,
),
data_types.Table(data=[[1, "True", None]]),
)
# allows further refinement
table.add_data(
data_types.Image(
np.random.rand(10, 10),
boxes=box_annotation,
masks=mask_annotation,
),
data_types.Table(data=[[1, True, 1]]),
)
# allows addition
table.add_data(
data_types.Image(
np.random.rand(10, 10),
boxes=box_annotation,
masks=mask_annotation,
),
data_types.Table(data=[[1, True, 1]]),
)
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="no pandas py3.10 wheel")
def test_nan_non_float():
import pandas as pd
wandb.Table(dataframe=pd.DataFrame(data=[["A"], [np.nan]], columns=["a"]))
def test_table_typing_numpy():
# Pulled from https://numpy.org/devdocs/user/basics.types.html
# Numerics
table = wandb.Table(columns=["A"], dtype=[NumberType])
table.add_data(None)
table.add_data(42)
table.add_data(np.byte(1))
table.add_data(np.short(42))
table.add_data(np.ushort(42))
table.add_data(np.intc(42))
table.add_data(np.uintc(42))
table.add_data(np.int_(42))
table.add_data(np.uint(42))
table.add_data(np.longlong(42))
table.add_data(np.ulonglong(42))
table.add_data(np.half(42))
table.add_data(np.float16(42))
table.add_data(np.single(42))
table.add_data(np.double(42))
table.add_data(np.longdouble(42))
table.add_data(np.csingle(42))
table.add_data(np.cdouble(42))
table.add_data(np.clongdouble(42))
table.add_data(np.int8(42))
table.add_data(np.int16(42))
table.add_data(np.int32(42))
table.add_data(np.int64(42))
table.add_data(np.uint8(42))
table.add_data(np.uint16(42))
table.add_data(np.uint32(42))
table.add_data(np.uint64(42))
table.add_data(np.intp(42))
table.add_data(np.uintp(42))
table.add_data(np.float32(42))
table.add_data(np.float64(42))
table.add_data(np.float_(42))
table.add_data(np.complex64(42))
table.add_data(np.complex128(42))
table.add_data(np.complex_(42))
# Booleans
table = wandb.Table(columns=["A"], dtype=[BooleanType])
table.add_data(None)
table.add_data(True)
table.add_data(False)
table.add_data(np.bool_(True))
# Array of Numerics
table = wandb.Table(columns=["A"], dtype=[[NumberType]])
table.add_data(None)
table.add_data([42])
table.add_data(np.array([1, 0], dtype=np.byte))
table.add_data(np.array([42, 42], dtype=np.short))
table.add_data(np.array([42, 42], dtype=np.ushort))
table.add_data(np.array([42, 42], dtype=np.intc))
table.add_data(np.array([42, 42], dtype=np.uintc))
table.add_data(np.array([42, 42], dtype=np.int_))
table.add_data(np.array([42, 42], dtype=np.uint))
table.add_data(np.array([42, 42], dtype=np.longlong))
table.add_data(np.array([42, 42], dtype=np.ulonglong))
table.add_data(np.array([42, 42], dtype=np.half))
table.add_data(np.array([42, 42], dtype=np.float16))
table.add_data(np.array([42, 42], dtype=np.single))
table.add_data(np.array([42, 42], dtype=np.double))
table.add_data(np.array([42, 42], dtype=np.longdouble))
table.add_data(np.array([42, 42], dtype=np.csingle))
table.add_data(np.array([42, 42], dtype=np.cdouble))
table.add_data(np.array([42, 42], dtype=np.clongdouble))
table.add_data(np.array([42, 42], dtype=np.int8))
table.add_data(np.array([42, 42], dtype=np.int16))
table.add_data(np.array([42, 42], dtype=np.int32))
table.add_data(np.array([42, 42], dtype=np.int64))
table.add_data(np.array([42, 42], dtype=np.uint8))
table.add_data(np.array([42, 42], dtype=np.uint16))
table.add_data(np.array([42, 42], dtype=np.uint32))
table.add_data(np.array([42, 42], dtype=np.uint64))
table.add_data(np.array([42, 42], dtype=np.intp))
table.add_data(np.array([42, 42], dtype=np.uintp))
table.add_data(np.array([42, 42], dtype=np.float32))
table.add_data(np.array([42, 42], dtype=np.float64))
table.add_data(np.array([42, 42], dtype=np.float_))
table.add_data(np.array([42, 42], dtype=np.complex64))
table.add_data(np.array([42, 42], dtype=np.complex128))
table.add_data(np.array([42, 42], dtype=np.complex_))
# Array of Booleans
table = wandb.Table(columns=["A"], dtype=[[BooleanType]])
table.add_data(None)
table.add_data([True])
table.add_data([False])
table.add_data(np.array([True, False], dtype=np.bool_))
# Nested arrays
table = wandb.Table(columns=["A"])
table.add_data([[[[1, 2, 3]]]])
table.add_data(np.array([[[[1, 2, 3]]]]))
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="no pandas py3.10 wheel")
def test_table_typing_pandas():
import pandas as pd
# TODO: Pandas https://pandas.pydata.org/pandas-docs/stable/user_guide/basics.html#basics-dtypes
# Numerics
table = wandb.Table(dataframe=pd.DataFrame([[1], [0]]).astype(np.byte))
table.add_data(1)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.short))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.ushort))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.intc))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.uintc))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.int_))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.uint))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.longlong))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.ulonglong))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.half))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.float16))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.single))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.double))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.longdouble))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.csingle))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.cdouble))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.clongdouble))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.int8))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.int16))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.int32))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.int64))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.uint8))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.uint16))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.uint32))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.uint64))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.intp))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.uintp))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.float32))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.float64))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.float_))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.complex64))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.complex128))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.complex_))
table.add_data(42)
# Boolean
table = wandb.Table(dataframe=pd.DataFrame([[True], [False]]).astype(np.bool_))
table.add_data(True)
# String aliased
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype("Int8"))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype("Int16"))
table.add_data(42)
table = wandb.Table(dataframe= | pd.DataFrame([[42], [42]]) | pandas.DataFrame |
"""
<NAME>017
Variational Autoencoder - Pan Cancer
scripts/vae_pancancer.py
Usage:
Run in command line with required command arguments:
python scripts/vae_pancancer.py --learning_rate
--batch_size
--epochs
--kappa
--depth
--output_filename
--num_components
--scale
--subset_mad_genes
--dataset
Typically, arguments to this script are compiled automatically.
See `scripts/num_components_paramsweep.py` for more details
Output:
Loss and validation loss for the specific model trained
"""
import os
import sys
import argparse
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import tensorflow as tf
from keras.layers import Input, Dense, Lambda, Layer, Activation
from keras.layers.normalization import BatchNormalization
from keras.models import Model, Sequential
from keras import backend as K
from keras import metrics, optimizers
from keras.callbacks import Callback
def run_vae(rnaseq_file, learning_rate, batch_size, epochs, kappa, depth, first_layer, output_filename, latent_dim, scale, subset_mad_genes, data_basename):
# Random seed
seed = int(np.random.randint(low=0, high=10000, size=1))
np.random.seed(seed)
# Load Data
#file = 'train_{}_expression_matrix_processed.tsv.gz'.format(dataset.lower())
#rnaseq_file = os.path.join('..', '0.expression-download', 'data', file)
rnaseq_df = pd.read_table(rnaseq_file, index_col=0)
# Determine most variably expressed genes and subset
if subset_mad_genes is not None:
mad_genes = rnaseq_df.mad(axis=0).sort_values(ascending=False)
top_mad_genes = mad_genes.iloc[0:int(subset_mad_genes), ].index
rnaseq_df =rnaseq_df.loc[:, top_mad_genes] #rnaseq_df.loc[:, top_mad_genes]
# Zero One normalize input data
if scale:
scaler = MinMaxScaler()
x = scaler.fit_transform(rnaseq_df)
rnaseq_df = | pd.DataFrame(x, index=rnaseq_df.index, columns=rnaseq_df.columns) | pandas.DataFrame |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def __unitsFormat(unitsInput):
if unitsInput != "":
unitsOutput = " ("+unitsInput+")"
else:
unitsOutput = unitsInput
return unitsOutput
# solveData = pd.DataFrame(data = [[1,2,4,8,16,32,64,128],[1,1,2,3,4,3,2,1]], columns=[0,1,2,3,4,5,6,7])
# solveData = solveData.transpose()
# a = [[1,2,3,4],[4,3,2,1,2,3,4,3,2,1]]
# #b= np.array(a)
# c = np.array(a).T
# newData = pd.DataFrame(data = a, columns=[1,2,3,3.5,4,5,6,7,8,9])
# newData = newData.transpose()
# currentData = solveData
# currentData
# currentData = pd.concat([currentData, newData], axis=1, sort=False)
# fig = plt.figure()
# fig.suptitle("pk model")
# ax = fig.add_subplot(1,1,1)
# ax.set_xlabel("Time"+__unitsFormat("s"))
# ax.set_ylabel("Volume"+__unitsFormat("mg"))
# plt.plot(currentData)
# plt.show()
# #def updateData(solveData, newdata)
class plot():
def __init__(self, masses=[],times=[]):
solveData = pd.DataFrame(data = masses, columns=times)
self.currentData = solveData.transpose()
def adddata (self,newData=[], newtimeseries=[]):
newData = np.array(newData)
if len(newData.shape) == 1:
newData = pd.DataFrame(data = [newData], columns=newtimeseries)
else:
newData = pd.DataFrame(data = newData, columns=newtimeseries)
newData = newData.transpose()
self.currentData = | pd.concat([self.currentData, newData], axis=1, sort=False) | pandas.concat |
"""Implements Survey class describing a single SEG-Y file"""
import os
import warnings
from copy import copy, deepcopy
from textwrap import dedent
import segyio
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
from scipy.interpolate import interp1d
from .gather import Gather
from .utils import to_list, maybe_copy, calculate_stats, create_supergather_index
class Survey: # pylint: disable=too-many-instance-attributes
"""A class representing a single SEG-Y file.
In order to reduce memory footprint, `Survey` instance does not store trace data, but only a requested subset of
trace headers and general file meta such as `samples` and `sample_rate`. Trace data can be obtained by generating
an instance of `Gather` class by calling either :func:`~Survey.get_gather` or :func:`~Survey.sample_gather`
method.
The resulting gather type depends on `header_index` argument, passed during `Survey` creation: traces are grouped
into gathers by the common value of headers, defined by `header_index`. Some usual values of `header_index`
include:
- 'TRACE_SEQUENCE_FILE' - to get individual traces,
- 'FieldRecord' - to get common source gathers,
- ['GroupX', 'GroupY'] - to get common receiver gathers,
- ['INLINE_3D', 'CROSSLINE_3D'] - to get common midpoint gathers.
`header_cols` argument specifies all other trace headers to load to further be available in gather processing
pipelines. Note that `TRACE_SEQUENCE_FILE` header is not loaded from the file but always automatically
reconstructed. All loaded headers are stored in a `headers` attribute as a `pd.DataFrame` with `header_index`
columns set as its index.
Examples
--------
Create a survey of common source gathers and get a randomly selected gather from it:
>>> survey = Survey(path, header_index="FieldRecord", header_cols=["TraceNumber", "offset"])
>>> gather = survey.sample_gather()
Parameters
----------
path : str
A path to the source SEG-Y file.
header_index : str or list of str
Trace headers to be used to group traces into gathers.
header_cols : str or list of str, optional
Extra trace headers to load. If not given, only headers from `header_index` are loaded and a
`TRACE_SEQUENCE_FILE` header is created automatically.
name : str, optional
Survey name. If not given, source file name is used. This name is mainly used to identify the survey when it is
added to an index, see :class:`~index.SeismicIndex` docs for more info.
limits : int or tuple or slice, optional
Default time limits to be used during trace loading and survey statistics calculation. `int` or `tuple` are
used as arguments to init a `slice` object. If not given, whole traces are used. Measured in samples.
collect_stats : bool, optional, defaults to False
Whether to calculate trace statistics for the survey, see :func:`~Survey.collect_stats` docs for more info.
kwargs : misc, optional
Additional keyword arguments to :func:`~Survey.collect_stats`.
Attributes
----------
path : str
A path to the source SEG-Y file.
name : str
Survey name.
headers : pd.DataFrame
Loaded trace headers.
samples : 1d np.ndarray of floats
Recording time for each trace value. Measured in milliseconds.
sample_rate : float
Sample rate of seismic traces. Measured in milliseconds.
limits : slice
Default time limits to be used during trace loading and survey statistics calculation. Measured in samples.
segy_handler : segyio.segy.SegyFile
Source SEG-Y file handler.
has_stats : bool
Whether the survey has trace statistics calculated.
min : np.float32
Minimum trace value. Available only if trace statistics were calculated.
max : np.float32
Maximum trace value. Available only if trace statistics were calculated.
mean : np.float32
Mean trace value. Available only if trace statistics were calculated.
std : np.float32
Standard deviation of trace values. Available only if trace statistics were calculated.
quantile_interpolator : scipy.interpolate.interp1d
Trace values quantile interpolator. Available only if trace statistics were calculated.
n_dead_traces : int
The number of traces with constant value (dead traces). Available only if trace statistics were calculated.
"""
def __init__(self, path, header_index, header_cols=None, name=None, limits=None, collect_stats=False, **kwargs):
self.path = path
basename = os.path.splitext(os.path.basename(self.path))[0]
self.name = name if name is not None else basename
if header_cols is None:
header_cols = set()
elif header_cols == "all":
header_cols = set(segyio.tracefield.keys.keys())
else:
header_cols = set(to_list(header_cols))
header_index = to_list(header_index)
load_headers = set(header_index) | header_cols
# We always reconstruct this column, so there is no need to load it.
if "TRACE_SEQUENCE_FILE" in load_headers:
load_headers.remove("TRACE_SEQUENCE_FILE")
warn_msg = ("An automatically reconstructed TRACE_SEQUENCE_FILE header will be used instead of the one, "
f"contained in {basename}")
warnings.warn(warn_msg, RuntimeWarning)
self.segy_handler = segyio.open(self.path, ignore_geometry=True)
self.segy_handler.mmap()
# Get attributes from the source SEG-Y file.
self.sample_rate = np.float32(segyio.dt(self.segy_handler) / 1000)
self.file_samples = self.segy_handler.samples.astype(np.float32)
# Set samples and samples_length according to passed `limits`.
self.limits = None
self.samples = None
self.samples_length = None
self.set_limits(limits)
headers = {}
for column in load_headers:
headers[column] = self.segy_handler.attributes(segyio.tracefield.keys[column])[:]
headers = | pd.DataFrame(headers) | pandas.DataFrame |
import tkinter as tk
from IPython.display import display
from tkinter import filedialog
import pandas as pd
from pymongo import MongoClient
#conectando DB
client = MongoClient("mongodb+srv://jsoeiro:<EMAIL>/myFirstDatabase?retryWrites=true&w=majority")
print('conectado com o banco')
db = client['dbycar']
collection = db['usuarios']
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename()
print(file_path)
data = pd.read_csv(file_path)
data.reset_index(inplace=True)
data_dict = data.to_dict("records")
df = | pd.DataFrame(data_dict) | pandas.DataFrame |
import datetime
from collections import OrderedDict
import warnings
import numpy as np
from numpy import array, nan
import pandas as pd
import pytest
from numpy.testing import assert_almost_equal, assert_allclose
from conftest import assert_frame_equal, assert_series_equal
from pvlib import irradiance
from conftest import requires_ephem, requires_numba
# fixtures create realistic test input data
# test input data generated at Location(32.2, -111, 'US/Arizona', 700)
# test input data is hard coded to avoid dependencies on other parts of pvlib
@pytest.fixture
def times():
# must include night values
return pd.date_range(start='20140624', freq='6H', periods=4,
tz='US/Arizona')
@pytest.fixture
def irrad_data(times):
return pd.DataFrame(np.array(
[[ 0. , 0. , 0. ],
[ 79.73860422, 316.1949056 , 40.46149818],
[1042.48031487, 939.95469881, 118.45831879],
[ 257.20751138, 646.22886049, 62.03376265]]),
columns=['ghi', 'dni', 'dhi'], index=times)
@pytest.fixture
def ephem_data(times):
return pd.DataFrame(np.array(
[[124.0390863 , 124.0390863 , -34.0390863 , -34.0390863 ,
352.69550699, -2.36677158],
[ 82.85457044, 82.97705621, 7.14542956, 7.02294379,
66.71410338, -2.42072165],
[ 10.56413562, 10.56725766, 79.43586438, 79.43274234,
144.76567754, -2.47457321],
[ 72.41687122, 72.46903556, 17.58312878, 17.53096444,
287.04104128, -2.52831909]]),
columns=['apparent_zenith', 'zenith', 'apparent_elevation',
'elevation', 'azimuth', 'equation_of_time'],
index=times)
@pytest.fixture
def dni_et(times):
return np.array(
[1321.1655834833093, 1321.1655834833093, 1321.1655834833093,
1321.1655834833093])
@pytest.fixture
def relative_airmass(times):
return pd.Series([np.nan, 7.58831596, 1.01688136, 3.27930443], times)
# setup for et rad test. put it here for readability
timestamp = pd.Timestamp('20161026')
dt_index = pd.DatetimeIndex([timestamp])
doy = timestamp.dayofyear
dt_date = timestamp.date()
dt_datetime = datetime.datetime.combine(dt_date, datetime.time(0))
dt_np64 = np.datetime64(dt_datetime)
value = 1383.636203
@pytest.mark.parametrize('testval, expected', [
(doy, value),
(np.float64(doy), value),
(dt_date, value),
(dt_datetime, value),
(dt_np64, value),
(np.array([doy]), np.array([value])),
(pd.Series([doy]), np.array([value])),
(dt_index, pd.Series([value], index=dt_index)),
(timestamp, value)
])
@pytest.mark.parametrize('method', [
'asce', 'spencer', 'nrel', pytest.param('pyephem', marks=requires_ephem)])
def test_get_extra_radiation(testval, expected, method):
out = irradiance.get_extra_radiation(testval, method=method)
assert_allclose(out, expected, atol=10)
def test_get_extra_radiation_epoch_year():
out = irradiance.get_extra_radiation(doy, method='nrel', epoch_year=2012)
assert_allclose(out, 1382.4926804890767, atol=0.1)
@requires_numba
def test_get_extra_radiation_nrel_numba(times):
with warnings.catch_warnings():
# don't warn on method reload or num threads
warnings.simplefilter("ignore")
result = irradiance.get_extra_radiation(
times, method='nrel', how='numba', numthreads=4)
# and reset to no-numba state
irradiance.get_extra_radiation(times, method='nrel')
assert_allclose(result,
[1322.332316, 1322.296282, 1322.261205, 1322.227091])
def test_get_extra_radiation_invalid():
with pytest.raises(ValueError):
irradiance.get_extra_radiation(300, method='invalid')
def test_grounddiffuse_simple_float():
result = irradiance.get_ground_diffuse(40, 900)
assert_allclose(result, 26.32000014911496)
def test_grounddiffuse_simple_series(irrad_data):
ground_irrad = irradiance.get_ground_diffuse(40, irrad_data['ghi'])
assert ground_irrad.name == 'diffuse_ground'
def test_grounddiffuse_albedo_0(irrad_data):
ground_irrad = irradiance.get_ground_diffuse(
40, irrad_data['ghi'], albedo=0)
assert 0 == ground_irrad.all()
def test_grounddiffuse_albedo_invalid_surface(irrad_data):
with pytest.raises(KeyError):
irradiance.get_ground_diffuse(
40, irrad_data['ghi'], surface_type='invalid')
def test_grounddiffuse_albedo_surface(irrad_data):
result = irradiance.get_ground_diffuse(40, irrad_data['ghi'],
surface_type='sand')
assert_allclose(result, [0, 3.731058, 48.778813, 12.035025], atol=1e-4)
def test_isotropic_float():
result = irradiance.isotropic(40, 100)
assert_allclose(result, 88.30222215594891)
def test_isotropic_series(irrad_data):
result = irradiance.isotropic(40, irrad_data['dhi'])
assert_allclose(result, [0, 35.728402, 104.601328, 54.777191], atol=1e-4)
def test_klucher_series_float():
# klucher inputs
surface_tilt, surface_azimuth = 40.0, 180.0
dhi, ghi = 100.0, 900.0
solar_zenith, solar_azimuth = 20.0, 180.0
# expect same result for floats and pd.Series
expected = irradiance.klucher(
surface_tilt, surface_azimuth,
pd.Series(dhi), pd.Series(ghi),
pd.Series(solar_zenith), pd.Series(solar_azimuth)
) # 94.99429931664851
result = irradiance.klucher(
surface_tilt, surface_azimuth, dhi, ghi, solar_zenith, solar_azimuth
)
assert_allclose(result, expected[0])
def test_klucher_series(irrad_data, ephem_data):
result = irradiance.klucher(40, 180, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'],
ephem_data['azimuth'])
# pvlib matlab 1.4 does not contain the max(cos_tt, 0) correction
# so, these values are different
assert_allclose(result, [0., 36.789794, 109.209347, 56.965916], atol=1e-4)
# expect same result for np.array and pd.Series
expected = irradiance.klucher(
40, 180, irrad_data['dhi'].values, irrad_data['ghi'].values,
ephem_data['apparent_zenith'].values, ephem_data['azimuth'].values
)
assert_allclose(result, expected, atol=1e-4)
def test_haydavies(irrad_data, ephem_data, dni_et):
result = irradiance.haydavies(
40, 180, irrad_data['dhi'], irrad_data['dni'], dni_et,
ephem_data['apparent_zenith'], ephem_data['azimuth'])
# values from matlab 1.4 code
assert_allclose(result, [0, 27.1775, 102.9949, 33.1909], atol=1e-4)
def test_reindl(irrad_data, ephem_data, dni_et):
result = irradiance.reindl(
40, 180, irrad_data['dhi'], irrad_data['dni'], irrad_data['ghi'],
dni_et, ephem_data['apparent_zenith'], ephem_data['azimuth'])
# values from matlab 1.4 code
assert_allclose(result, [np.nan, 27.9412, 104.1317, 34.1663], atol=1e-4)
def test_king(irrad_data, ephem_data):
result = irradiance.king(40, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'])
assert_allclose(result, [0, 44.629352, 115.182626, 79.719855], atol=1e-4)
def test_perez(irrad_data, ephem_data, dni_et, relative_airmass):
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'], dni,
dni_et, ephem_data['apparent_zenith'],
ephem_data['azimuth'], relative_airmass)
expected = pd.Series(np.array(
[ 0. , 31.46046871, np.nan, 45.45539877]),
index=irrad_data.index)
assert_series_equal(out, expected, check_less_precise=2)
def test_perez_components(irrad_data, ephem_data, dni_et, relative_airmass):
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'], dni,
dni_et, ephem_data['apparent_zenith'],
ephem_data['azimuth'], relative_airmass,
return_components=True)
expected = pd.DataFrame(np.array(
[[ 0. , 31.46046871, np.nan, 45.45539877],
[ 0. , 26.84138589, np.nan, 31.72696071],
[ 0. , 0. , np.nan, 4.47966439],
[ 0. , 4.62212181, np.nan, 9.25316454]]).T,
columns=['sky_diffuse', 'isotropic', 'circumsolar', 'horizon'],
index=irrad_data.index
)
expected_for_sum = expected['sky_diffuse'].copy()
expected_for_sum.iloc[2] = 0
sum_components = out.iloc[:, 1:].sum(axis=1)
sum_components.name = 'sky_diffuse'
assert_frame_equal(out, expected, check_less_precise=2)
assert_series_equal(sum_components, expected_for_sum, check_less_precise=2)
def test_perez_arrays(irrad_data, ephem_data, dni_et, relative_airmass):
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'].values, dni.values,
dni_et, ephem_data['apparent_zenith'].values,
ephem_data['azimuth'].values,
relative_airmass.values)
expected = np.array(
[ 0. , 31.46046871, np.nan, 45.45539877])
assert_allclose(out, expected, atol=1e-2)
assert isinstance(out, np.ndarray)
def test_perez_scalar():
# copied values from fixtures
out = irradiance.perez(40, 180, 118.45831879, 939.95469881,
1321.1655834833093, 10.56413562, 144.76567754,
1.01688136)
# this will fail. out is ndarry with ndim == 0. fix in future version.
# assert np.isscalar(out)
assert_allclose(out, 109.084332)
@pytest.mark.parametrize('model', ['isotropic', 'klucher', 'haydavies',
'reindl', 'king', 'perez'])
def test_sky_diffuse_zenith_close_to_90(model):
# GH 432
sky_diffuse = irradiance.get_sky_diffuse(
30, 180, 89.999, 230,
dni=10, ghi=51, dhi=50, dni_extra=1360, airmass=12, model=model)
assert sky_diffuse < 100
def test_get_sky_diffuse_invalid():
with pytest.raises(ValueError):
irradiance.get_sky_diffuse(
30, 180, 0, 180, 1000, 1100, 100, dni_extra=1360, airmass=1,
model='invalid')
def test_liujordan():
expected = pd.DataFrame(np.array(
[[863.859736967, 653.123094076, 220.65905025]]),
columns=['ghi', 'dni', 'dhi'],
index=[0])
out = irradiance.liujordan(
pd.Series([10]), pd.Series([0.5]), pd.Series([1.1]), dni_extra=1400)
assert_frame_equal(out, expected)
def test_get_total_irradiance(irrad_data, ephem_data, dni_et, relative_airmass):
models = ['isotropic', 'klucher',
'haydavies', 'reindl', 'king', 'perez']
for model in models:
total = irradiance.get_total_irradiance(
32, 180,
ephem_data['apparent_zenith'], ephem_data['azimuth'],
dni=irrad_data['dni'], ghi=irrad_data['ghi'],
dhi=irrad_data['dhi'],
dni_extra=dni_et, airmass=relative_airmass,
model=model,
surface_type='urban')
assert total.columns.tolist() == ['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse']
@pytest.mark.parametrize('model', ['isotropic', 'klucher',
'haydavies', 'reindl', 'king', 'perez'])
def test_get_total_irradiance_scalars(model):
total = irradiance.get_total_irradiance(
32, 180,
10, 180,
dni=1000, ghi=1100,
dhi=100,
dni_extra=1400, airmass=1,
model=model,
surface_type='urban')
assert list(total.keys()) == ['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse']
# test that none of the values are nan
assert np.isnan(np.array(list(total.values()))).sum() == 0
def test_poa_components(irrad_data, ephem_data, dni_et, relative_airmass):
aoi = irradiance.aoi(40, 180, ephem_data['apparent_zenith'],
ephem_data['azimuth'])
gr_sand = irradiance.get_ground_diffuse(40, irrad_data['ghi'],
surface_type='sand')
diff_perez = irradiance.perez(
40, 180, irrad_data['dhi'], irrad_data['dni'], dni_et,
ephem_data['apparent_zenith'], ephem_data['azimuth'], relative_airmass)
out = irradiance.poa_components(
aoi, irrad_data['dni'], diff_perez, gr_sand)
expected = pd.DataFrame(np.array(
[[ 0. , -0. , 0. , 0. ,
0. ],
[ 35.19456561, 0. , 35.19456561, 31.4635077 ,
3.73105791],
[956.18253696, 798.31939281, 157.86314414, 109.08433162,
48.77881252],
[ 90.99624896, 33.50143401, 57.49481495, 45.45978964,
12.03502531]]),
columns=['poa_global', 'poa_direct', 'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse'],
index=irrad_data.index)
assert_frame_equal(out, expected)
@pytest.mark.parametrize('pressure,expected', [
(93193, [[830.46567, 0.79742, 0.93505],
[676.09497, 0.63776, 3.02102]]),
(None, [[868.72425, 0.79742, 1.01664],
[680.66679, 0.63776, 3.28463]]),
(101325, [[868.72425, 0.79742, 1.01664],
[680.66679, 0.63776, 3.28463]])
])
def test_disc_value(pressure, expected):
# see GH 449 for pressure=None vs. 101325.
columns = ['dni', 'kt', 'airmass']
times = pd.DatetimeIndex(['2014-06-24T1200', '2014-06-24T1800'],
tz='America/Phoenix')
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
out = irradiance.disc(ghi, zenith, times, pressure=pressure)
expected_values = np.array(expected)
expected = pd.DataFrame(expected_values, columns=columns, index=times)
# check the pandas dataframe. check_less_precise is weird
assert_frame_equal(out, expected, check_less_precise=True)
# use np.assert_allclose to check values more clearly
assert_allclose(out.values, expected_values, atol=1e-5)
def test_disc_overirradiance():
columns = ['dni', 'kt', 'airmass']
ghi = np.array([3000])
solar_zenith = np.full_like(ghi, 0)
times = pd.date_range(start='2016-07-19 12:00:00', freq='1s',
periods=len(ghi), tz='America/Phoenix')
out = irradiance.disc(ghi=ghi, solar_zenith=solar_zenith,
datetime_or_doy=times)
expected = pd.DataFrame(np.array(
[[8.72544336e+02, 1.00000000e+00, 9.99493933e-01]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
def test_disc_min_cos_zenith_max_zenith():
# map out behavior under difficult conditions with various
# limiting kwargs settings
columns = ['dni', 'kt', 'airmass']
times = pd.DatetimeIndex(['2016-07-19 06:11:00'], tz='America/Phoenix')
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 1.16046346e-02, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# max_zenith and/or max_airmass keep these results reasonable
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 1.0, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# still get reasonable values because of max_airmass=12 limit
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
max_zenith=100)
expected = pd.DataFrame(np.array(
[[0., 1.16046346e-02, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# still get reasonable values because of max_airmass=12 limit
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0, max_zenith=100)
expected = pd.DataFrame(np.array(
[[277.50185968, 1.0, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# max_zenith keeps this result reasonable
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0, max_airmass=100)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 1.0, 36.39544757]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# allow zenith to be close to 90 and airmass to be infinite
# and we get crazy values
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
max_zenith=100, max_airmass=100)
expected = pd.DataFrame(np.array(
[[6.68577449e+03, 1.16046346e-02, 3.63954476e+01]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# allow min cos zenith to be 0, zenith to be close to 90,
# and airmass to be very big and we get even higher DNI values
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0, max_zenith=100, max_airmass=100)
expected = pd.DataFrame(np.array(
[[7.21238390e+03, 1., 3.63954476e+01]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
def test_dirint_value():
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure)
assert_almost_equal(dirint_data.values,
np.array([868.8, 699.7]), 1)
def test_dirint_nans():
times = pd.date_range(start='2014-06-24T12-0700', periods=5, freq='6H')
ghi = pd.Series([np.nan, 1038.62, 1038.62, 1038.62, 1038.62], index=times)
zenith = pd.Series([10.567, np.nan, 10.567, 10.567, 10.567], index=times)
pressure = pd.Series([93193., 93193., np.nan, 93193., 93193.], index=times)
temp_dew = pd.Series([10, 10, 10, np.nan, 10], index=times)
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure,
temp_dew=temp_dew)
assert_almost_equal(dirint_data.values,
np.array([np.nan, np.nan, np.nan, np.nan, 893.1]), 1)
def test_dirint_tdew():
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure,
temp_dew=10)
assert_almost_equal(dirint_data.values,
np.array([882.1, 672.6]), 1)
def test_dirint_no_delta_kt():
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure,
use_delta_kt_prime=False)
assert_almost_equal(dirint_data.values,
np.array([861.9, 670.4]), 1)
def test_dirint_coeffs():
coeffs = irradiance._get_dirint_coeffs()
assert coeffs[0, 0, 0, 0] == 0.385230
assert coeffs[0, 1, 2, 1] == 0.229970
assert coeffs[3, 2, 6, 3] == 1.032260
def test_dirint_min_cos_zenith_max_zenith():
# map out behavior under difficult conditions with various
# limiting kwargs settings
# times don't have any physical relevance
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([0, 1], index=times)
solar_zenith = pd.Series([90, 89.99], index=times)
out = irradiance.dirint(ghi, solar_zenith, times)
expected = pd.Series([0.0, 0.0], index=times, name='dni')
assert_series_equal(out, expected)
out = irradiance.dirint(ghi, solar_zenith, times, min_cos_zenith=0)
expected = pd.Series([0.0, 0.0], index=times, name='dni')
assert_series_equal(out, expected)
out = irradiance.dirint(ghi, solar_zenith, times, max_zenith=90)
expected = pd.Series([0.0, 0.0], index=times, name='dni')
assert_series_equal(out, expected, check_less_precise=True)
out = irradiance.dirint(ghi, solar_zenith, times, min_cos_zenith=0,
max_zenith=90)
expected = pd.Series([0.0, 144.264507], index=times, name='dni')
assert_series_equal(out, expected, check_less_precise=True)
out = irradiance.dirint(ghi, solar_zenith, times, min_cos_zenith=0,
max_zenith=100)
expected = pd.Series([0.0, 144.264507], index=times, name='dni')
assert_series_equal(out, expected, check_less_precise=True)
def test_gti_dirint():
times = pd.DatetimeIndex(
['2014-06-24T06-0700', '2014-06-24T09-0700', '2014-06-24T12-0700'])
poa_global = np.array([20, 300, 1000])
aoi = np.array([100, 70, 10])
zenith = np.array([80, 45, 20])
azimuth = np.array([90, 135, 180])
surface_tilt = 30
surface_azimuth = 180
# test defaults
output = irradiance.gti_dirint(
poa_global, aoi, zenith, azimuth, times, surface_tilt, surface_azimuth)
expected_col_order = ['ghi', 'dni', 'dhi']
expected = pd.DataFrame(array(
[[ 21.05796198, 0. , 21.05796198],
[ 288.22574368, 60.59964218, 245.37532576],
[ 931.04078010, 695.94965324, 277.06172442]]),
columns=expected_col_order, index=times)
assert_frame_equal(output, expected)
# test ignore calculate_gt_90
output = irradiance.gti_dirint(
poa_global, aoi, zenith, azimuth, times, surface_tilt, surface_azimuth,
calculate_gt_90=False)
expected_no_90 = expected.copy()
expected_no_90.iloc[0, :] = np.nan
assert_frame_equal(output, expected_no_90)
# test pressure input
pressure = 93193.
output = irradiance.gti_dirint(
poa_global, aoi, zenith, azimuth, times, surface_tilt, surface_azimuth,
pressure=pressure)
expected = pd.DataFrame(array(
[[ 21.05796198, 0. , 21.05796198],
[ 289.81109139, 60.52460392, 247.01373353],
[ 932.46756378, 648.05001357, 323.49974813]]),
columns=expected_col_order, index=times)
assert_frame_equal(output, expected)
# test albedo input
albedo = 0.05
output = irradiance.gti_dirint(
poa_global, aoi, zenith, azimuth, times, surface_tilt, surface_azimuth,
albedo=albedo)
expected = pd.DataFrame(array(
[[ 21.3592591, 0. , 21.3592591 ],
[ 292.5162373, 64.42628826, 246.95997198],
[ 941.6753031, 727.16311901, 258.36548605]]),
columns=expected_col_order, index=times)
assert_frame_equal(output, expected)
# test temp_dew input
temp_dew = np.array([70, 80, 20])
output = irradiance.gti_dirint(
poa_global, aoi, zenith, azimuth, times, surface_tilt, surface_azimuth,
temp_dew=temp_dew)
expected = pd.DataFrame(array(
[[ 21.05796198, 0. , 21.05796198],
[ 292.40468994, 36.79559287, 266.3862767 ],
[ 931.79627208, 689.81549269, 283.5817439]]),
columns=expected_col_order, index=times)
assert_frame_equal(output, expected)
def test_erbs():
index = pd.DatetimeIndex(['20190101']*3 + ['20190620'])
ghi = pd.Series([0, 50, 1000, 1000], index=index)
zenith = pd.Series([120, 85, 10, 10], index=index)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[9.67192672e+01, 4.15703604e+01, 4.05723511e-01],
[7.94205651e+02, 2.17860117e+02, 7.18132729e-01],
[8.42001578e+02, 1.70790318e+02, 7.68214312e-01]]),
columns=['dni', 'dhi', 'kt'], index=index)
out = irradiance.erbs(ghi, zenith, index)
assert_frame_equal(np.round(out, 0), np.round(expected, 0))
def test_erbs_min_cos_zenith_max_zenith():
# map out behavior under difficult conditions with various
# limiting kwargs settings
columns = ['dni', 'dhi', 'kt']
times = pd.DatetimeIndex(['2016-07-19 06:11:00'], tz='America/Phoenix')
# max_zenith keeps these results reasonable
out = irradiance.erbs(ghi=1.0, zenith=89.99999,
datetime_or_doy=times, min_cos_zenith=0)
expected = pd.DataFrame(np.array(
[[0., 1., 1.]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# 4-5 9s will produce bad behavior without max_zenith limit
out = irradiance.erbs(ghi=1.0, zenith=89.99999,
datetime_or_doy=times, max_zenith=100)
expected = pd.DataFrame(np.array(
[[6.00115286e+03, 9.98952601e-01, 1.16377640e-02]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# 1-2 9s will produce bad behavior without either limit
out = irradiance.erbs(ghi=1.0, zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0, max_zenith=100)
expected = pd.DataFrame(np.array(
[[4.78419761e+03, 1.65000000e-01, 1.00000000e+00]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# check default behavior under hardest condition
out = irradiance.erbs(ghi=1.0, zenith=90, datetime_or_doy=times)
expected = pd.DataFrame(np.array(
[[0., 1., 0.01163776]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
def test_erbs_all_scalar():
ghi = 1000
zenith = 10
doy = 180
expected = OrderedDict()
expected['dni'] = 8.42358014e+02
expected['dhi'] = 1.70439297e+02
expected['kt'] = 7.68919470e-01
out = irradiance.erbs(ghi, zenith, doy)
for k, v in out.items():
assert_allclose(v, expected[k], 5)
def test_dirindex(times):
ghi = | pd.Series([0, 0, 1038.62, 254.53], index=times) | pandas.Series |
import pandas as pd
import numpy as np
en_plt = False
en_tabulate = False
try:
import matplotlib.pyplot as plt
en_plt = True
except Exception as err:
print("not able to load matplotlib.pyplot")
pass
try:
from tabulate import tabulate
en_tabulate = True
except Exception as err:
print("not able to load tabulate")
pass
class PlotFunctions:
"""This class provides plot functions for MCenter time capture
"""
def __init__(self, mtc):
"""Initialized the parameters of the untar class."""
self._mtc = mtc
self._events_df_file = mtc.get_events()
self._file_names = mtc.get_file_names()
self._matrix_df_file = mtc.get_matrix_df_file()
self._multigraph_df_file = mtc.get_multigraph_df_file()
self._attribute_names_list = mtc.get_attribute_names_list()
def line_plot(self, name):
"""
Plotting of line graphs per bin per file name
:param self:
:param name: attibute name
:return:
"""
df = self._mtc.get_stats(name, mlapp_node=None, agent=None, start_time=None, end_time=None)
if ("keys" in df.columns) and en_plt:
color_list = ['r', 'b', 'g', 'c', 'k', 'y', '0.75', 'm', '0.25']
bins, bins_vert = self.hist_bin_adapt(df)
name_pipeline, td_matrix = self.align_bins(df)
all_pipelines = list(set(name_pipeline))
num_of_bins = len(bins)
for pipelines_elements in all_pipelines:
fig = plt.figure()
ax3 = fig.add_subplot(111)
# Get Elements for the specific pipeline
file_index = [i for i, e in enumerate(name_pipeline)
if e == pipelines_elements]
lineplot_time1 = df["time"].iloc[file_index].tolist()
time_values1 = df["datetime"].iloc[file_index].tolist()
# Plot per bin
for bin_index in range(0, num_of_bins):
td_matrix1 = td_matrix[file_index, bin_index]
ax3.plot(lineplot_time1, td_matrix1,
color=color_list[bin_index % (len(color_list))],
label="Bin " + str(bins[bin_index]), linewidth=4)
ax3.set_xticklabels(time_values1, ha='center')
ax3.tick_params(labelsize=8)
self.annotate_events(figure=ax3)
ax3.legend(bbox_to_anchor=(1, 1), prop={'size': 10}, loc=2)
ax3.grid()
ax3.set_title('Linegraph vs time for ' + str(name) + " @ Pipeline "
+ pipelines_elements)
ax3.set_ylabel('value')
ax3.set_xlabel('time')
def annotate_events(self, figure):
"""
Event annotation in the plot
:param self:
:param figure: Figure
:return:
"""
# set marker in model change event (blue) and in alerts (red)
first_model = True
first_alert = True
for location_index in range(0, self._events_df_file.shape[0]):
if self._events_df_file["eventType"].loc[location_index] == "Model":
if first_model:
figure.axvline(x=self._events_df_file["time"]
.loc[location_index], linewidth=2, linestyle='dashed',
color='b', label='model_Update')
first_model = False
else:
figure.axvline(x=self._events_df_file["time"]
.loc[location_index], linewidth=2, linestyle='dashed', color='b')
if self._events_df_file["raiseAlert"].loc[location_index] == 1:
if first_alert:
figure.axvline(x=self._events_df_file["time"]
.loc[location_index], linewidth=2, linestyle='dashed',
color='r', label='Health_Alert')
first_alert = False
else:
figure.axvline(x=self._events_df_file["time"]
.loc[location_index], linewidth=2, linestyle='dashed', color='r')
def bar_plot(self, name):
"""
Plotting of Overlapping Bar Graphs:
:param self:
:param name: Attribute name
:return:
"""
df = self._mtc.get_stats(name, mlapp_node=None, agent=None, start_time=None, end_time=None)
if ("keys" in df.columns) and en_plt:
bins, bins_vert = self.hist_bin_adapt(df)
name_pipeline, td_matrix = self.align_bins(df)
if len(bins) > 1:
fig = plt.figure()
figure = fig.add_subplot(111)
color_list = [(1, 0, 0), (0, 0, 1)]
bins_scale = np.arange(0, len(bins))
all_pipelines = list(set(name_pipeline))
for p_idx, pipeline in enumerate(all_pipelines):
file_index = [i for i, e in enumerate(name_pipeline)
if e == pipeline]
for location_index in file_index:
# 2D plotting of bar graph
colors = tuple((location_index + 1) / (max(file_index) + 1) *
np.array(color_list[p_idx % (len(color_list))]))
figure.bar(bins_scale, td_matrix[location_index, :],
color=colors, align='center', alpha=0.1)
figure.bar(bins_scale, td_matrix[location_index, :],
color=color_list[p_idx % (len(color_list))],
align='center', alpha=0.1,
label="pipeline " + pipeline)
figure.set_xticks(bins_scale)
figure.set_xticklabels(bins_vert)
figure.tick_params(labelsize=8)
figure.legend(bbox_to_anchor=(1, 1), prop={'size': 10}, loc=2)
figure.grid()
figure.set_title('BarGraph for ' + str(name))
figure.set_ylabel('normalized bar')
figure.set_xlabel('bars')
# Matrix Printing
def print_matrix(self):
"""
printing matix:
:param self:
:return:
"""
for filename in self._file_names:
try:
df_file = self._matrix_df_file[filename]
print("")
print("")
print("=======================================================")
name_list = df_file["Name"].unique()
for name in name_list:
df = df_file[df_file['Name'] == name]
df = df.reset_index()
num_graphs = df.shape[0]
matrix_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import covasim as cv # Version used in our study is 3.07
import random
from causal_testing.specification.causal_dag import CausalDAG
from causal_testing.specification.scenario import Scenario
from causal_testing.specification.variable import Input, Output
from causal_testing.specification.causal_specification import CausalSpecification
from causal_testing.data_collection.data_collector import ExperimentalDataCollector
from causal_testing.testing.causal_test_case import CausalTestCase
from causal_testing.testing.causal_test_outcome import Positive, Negative, NoEffect
from causal_testing.testing.intervention import Intervention
from causal_testing.testing.causal_test_engine import CausalTestEngine
from causal_testing.testing.estimators import LinearRegressionEstimator
def experimental_causal_test_vaccinate_elderly(runs_per_test_per_config: int = 30, verbose: bool = False):
""" Run the causal test case for the effect of changing vaccine to prioritise elderly. This uses the experimental
data collector.
:param runs_per_test_per_config: Number of times to run each input configuration (control and treatment) per test.
Hence, the total number of runs per test will be twice this value.
:param verbose: Whether to print verbose details (causal test results).
:return results_dict: A dictionary containing ATE, 95% CIs, and Test Pass/Fail
"""
# 1. Read in the Causal DAG
causal_dag = CausalDAG('dag.dot')
# 2. Create variables
pop_size = Input('pop_size', int)
pop_infected = Input('pop_infected', int)
n_days = Input('n_days', int)
vaccine = Input('vaccine', int)
cum_infections = Output('cum_infections', int)
cum_vaccinations = Output('cum_vaccinations', int)
cum_vaccinated = Output('cum_vaccinated', int)
max_doses = Output('max_doses', int)
# 3. Create scenario by applying constraints over a subset of the input variables
scenario = Scenario(variables={pop_size, pop_infected, n_days, cum_infections, vaccine,
cum_vaccinated, cum_vaccinations, max_doses},
constraints={pop_size.z3 == 50000, pop_infected.z3 == 1000, n_days.z3 == 50})
# 4. Construct a causal specification from the scenario and causal DAG
causal_specification = CausalSpecification(scenario, causal_dag)
# 5. Instantiate the experimental data collector for Covasim
covasim_parameters_dict = {'pop_size': 50000,
'pop_type': 'hybrid',
'pop_infected': 1000,
'n_days': 50}
control_input_configuration = {'covasim_parameters_dict': covasim_parameters_dict,
'target_elderly': False}
treatment_input_configuration = {'covasim_parameters_dict': covasim_parameters_dict,
'target_elderly': True}
data_collector = CovasimVaccineDataCollector(scenario, control_input_configuration,
treatment_input_configuration,
runs_per_test_per_config)
# 6. Express expected outcomes
expected_outcome_effects = {cum_infections: Positive(),
cum_vaccinations: Negative(),
cum_vaccinated: Negative(),
max_doses: NoEffect()
}
results_dict = {'cum_infections': {},
'cum_vaccinations': {},
'cum_vaccinated': {},
'max_doses': {}
}
for outcome_variable, expected_effect in expected_outcome_effects.items():
causal_test_case = CausalTestCase(control_input_configuration={vaccine: 0},
expected_causal_effect=expected_effect,
treatment_input_configuration={vaccine: 1},
outcome_variables={outcome_variable})
# 7. Create an instance of the causal test engine
causal_test_engine = CausalTestEngine(causal_test_case, causal_specification, data_collector)
# 8. Obtain the minimal adjustment set for the causal test case from the causal DAG
minimal_adjustment_set = causal_test_engine.load_data(index_col=0)
# 9. Build statistical model
linear_regression_estimator = LinearRegressionEstimator((vaccine.name,), 1, 0,
minimal_adjustment_set,
(outcome_variable.name,))
# 10. Execute test and save results in dict
causal_test_result = causal_test_engine.execute_test(linear_regression_estimator, 'ate')
if verbose:
print(f"Causation:\n{causal_test_result}")
results_dict[outcome_variable.name]['ate'] = causal_test_result.ate
results_dict[outcome_variable.name]['cis'] = causal_test_result.confidence_intervals
results_dict[outcome_variable.name]['test_passes'] = causal_test_case.expected_causal_effect.apply(
causal_test_result)
return results_dict
class CovasimVaccineDataCollector(ExperimentalDataCollector):
"""A custom experimental data collector for the elderly vaccination Covasim case study.
This experimental data collector runs covasim with a normal Pfizer vaccine and then again with the same vaccine but
this time prioritising the elderly for vaccination.
"""
def run_system_with_input_configuration(self, input_configuration: dict) -> pd.DataFrame:
""" Run the system with a given input configuration.
:param input_configuration: A nested dictionary containing Covasim parameters, desired number of repeats, and
a bool to determine whether elderly should be prioritised for vaccination.
:return: A dataframe containing results for this input configuration.
"""
results_df = self.simulate_vaccine(input_configuration['covasim_parameters_dict'],
self.n_repeats,
input_configuration['target_elderly'])
return results_df
def simulate_vaccine(self, pars_dict: dict, n_simulations: int = 100, target_elderly: bool = False):
""" Simulate observational data that contains a vaccine that is optionally given preferentially to the elderly.
:param pars_dict: A dictionary containing simulation parameters.
:param n_simulations: Number of simulations to run.
:param target_elderly: Whether to prioritise vaccination for the elderly.
:return: A pandas dataframe containing results for each run.
"""
simulations_results_dfs = []
for sim_n in range(n_simulations):
print(f'Simulation {sim_n + 1}/{n_simulations}.')
# Update simulation parameters with vaccine and optionally sub-target
if target_elderly:
print("Prioritising the elderly for vaccination")
vaccine = cv.vaccinate_prob(vaccine="Pfizer", label="prioritise_elderly",
subtarget=self.vaccinate_by_age, days=list(range(7, pars_dict['n_days'])))
else:
print("Using standard vaccination protocol")
vaccine = cv.vaccinate_prob(vaccine="Pfizer", label="regular", days=list(range(7, pars_dict['n_days'])))
pars_dict['interventions'] = vaccine
pars_dict['use_waning'] = True # Must be set to true for vaccination
sim_results_df = self.run_sim_with_pars(pars_dict=pars_dict,
desired_outputs=['cum_infections', 'cum_deaths', 'cum_recoveries',
'cum_vaccinations', 'cum_vaccinated'],
n_runs=1)
sim_results_df['interventions'] = vaccine.label # Store label in results instead of vaccine object
sim_results_df['target_elderly'] = target_elderly
sim_results_df['vaccine'] = int(target_elderly) # 0 if standard vaccine, 1 if target elderly vaccine
sim_results_df['max_doses'] = vaccine.p['doses'] # Get max doses for the vaccine
simulations_results_dfs.append(sim_results_df)
# Create a single dataframe containing a row for every execution
obs_df = | pd.concat(simulations_results_dfs, ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
from __future__ import print_function
from distutils.version import LooseVersion
from numpy import nan, random
import numpy as np
from pandas.compat import lrange
from pandas import (DataFrame, Series, Timestamp,
date_range)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData, _check_mixed_float
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
class TestDataFrameMissingData(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_dropEmptyRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
original = Series(mat, index=self.frame.index, name='foo')
expected = original.dropna()
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna(how='all')
# check that original was preserved
assert_series_equal(frame['foo'], original)
inplace_frame1.dropna(how='all', inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame1['foo'], expected)
smaller_frame = frame.dropna(how='all', subset=['foo'])
inplace_frame2.dropna(how='all', subset=['foo'], inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame2['foo'], expected)
def test_dropIncompleteRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
frame['bar'] = 5
original = Series(mat, index=self.frame.index, name='foo')
inp_frame1, inp_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna()
assert_series_equal(frame['foo'], original)
inp_frame1.dropna(inplace=True)
exp = Series(mat[5:], index=self.frame.index[5:], name='foo')
tm.assert_series_equal(smaller_frame['foo'], exp)
tm.assert_series_equal(inp_frame1['foo'], exp)
samesize_frame = frame.dropna(subset=['bar'])
assert_series_equal(frame['foo'], original)
self.assertTrue((frame['bar'] == 5).all())
inp_frame2.dropna(subset=['bar'], inplace=True)
self.assert_index_equal(samesize_frame.index, self.frame.index)
self.assert_index_equal(inp_frame2.index, self.frame.index)
def test_dropna(self):
df = DataFrame(np.random.randn(6, 4))
df[2][:2] = nan
dropped = df.dropna(axis=1)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, inplace=True)
assert_frame_equal(dropped, expected)
| assert_frame_equal(inp, expected) | pandas.util.testing.assert_frame_equal |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
| tm.assert_frame_equal(result, df) | pandas._testing.assert_frame_equal |
#Calculate the Linear Regression between Market Caps
import pandas as pd
import numpy as np
import datetime as date
today = date.datetime.now().strftime('%Y-%m-%d')
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import plotly.io as pio
pio.renderers.default = "browser"
from checkonchain.general.coinmetrics_api import *
from checkonchain.btconchain.btc_add_metrics import *
from checkonchain.dcronchain.dcr_add_metrics import *
from checkonchain.general.regression_analysis import *
#Pull Coinmetrics Data for Coins
BTC = btc_add_metrics().btc_coin()
LTC = Coinmetrics_api('ltc',"2011-10-07",today).convert_to_pd()
BCH = Coinmetrics_api('bch',"2017-08-01",today).convert_to_pd()
DAS = Coinmetrics_api('dash',"2014-01-19",today).convert_to_pd()
DCR = dcr_add_metrics().dcr_coin()
XMR = Coinmetrics_api('xmr',"2014-04-18",today).convert_to_pd()
ZEC = Coinmetrics_api('zec',"2016-10-28",today).convert_to_pd()
ETH = Coinmetrics_api('eth',"2015-07-30",today).convert_to_pd()
XRP = Coinmetrics_api('xrp',"2013-01-01",today).convert_to_pd()
#Reduce dataset down to date and a single metric
metric="CapMrktCurUSD"
BTC2 =BTC[['date',metric]]
LTC2 =LTC[['date',metric]]
BCH2 =BCH[['date',metric]]
DAS2 =DAS[['date',metric]]
DCR2 =DCR[['date',metric]]
XMR2 =XMR[['date',metric]]
ZEC2 =ZEC[['date',metric]]
ETH2 =ETH[['date',metric]]
#XRP2 =XRP[['date',metric]]
#Rename all columns
prefix = 'Cap_'
BTC2.columns =['date',prefix+'BTC']
LTC2.columns =['date',prefix+'LTC']
BCH2.columns =['date',prefix+'BCH']
DAS2.columns=['date',prefix+'DAS']
DCR2.columns =['date',prefix+'DCR']
XMR2.columns =['date',prefix+'XMR']
ZEC2.columns =['date',prefix+'ZEC']
ETH2.columns =['date',prefix+'ETH']
XRP2.columns =['date',prefix+'XRP']
#Compile into a single dataframe with all coins
BTC_data = BTC2.dropna(axis=0)
BTC_data = | pd.merge_asof(BTC_data,LTC2,on='date') | pandas.merge_asof |
# -*- coding: utf-8 -*-
# @Time : 2018/10/3 下午2:36
# @Author : yidxue
import pandas as pd
from common.util_function import *
df1 = pd.DataFrame(data={'name': ['a', 'b', 'c', 'd'], 'gender': ['male', 'male', 'female', 'female']})
df2 = pd.DataFrame(data={'name': ['a', 'b', 'c', 'e'], 'age': [21, 22, 23, 20]})
print_line("inner join")
print_br(pd.merge(df1, df2, on=['name'], how='inner'))
print_line("inner join")
print_br(df1.merge(df2, how='inner', on=['name']))
print_line("left join")
print_br(pd.merge(df1, df2, on=['name'], how='left'))
print_line("outer join")
print_br( | pd.merge(df1, df2, on=['name'], how='outer') | pandas.merge |
import numpy as np
import pandas as pd
import pytest
@pytest.fixture(scope="module")
def df_vartypes():
data = {
"Name": ["tom", "nick", "krish", "jack"],
"City": ["London", "Manchester", "Liverpool", "Bristol"],
"Age": [20, 21, 19, 18],
"Marks": [0.9, 0.8, 0.7, 0.6],
"dob": pd.date_range("2020-02-24", periods=4, freq="T"),
}
df = | pd.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python
# coding: utf-8
# # Benchmark Results
# This notebook visualizes the output from the different models on different classification problems
# In[1]:
import collections
import glob
import json
import os
import numpy as np
import pandas as pd
from plotnine import *
from saged.utils import split_sample_names, create_dataset_stat_df, get_dataset_stats, parse_map_file
# ## Set Up Functions and Get Metadata
# In[3]:
def return_unlabeled():
# For use in a defaultdict
return 'unlabeled'
# In[4]:
data_dir = '../../data/'
map_file = os.path.join(data_dir, 'sample_classifications.pkl')
sample_to_label = parse_map_file(map_file)
sample_to_label = collections.defaultdict(return_unlabeled, sample_to_label)
# In[ ]:
metadata_path = os.path.join(data_dir, 'aggregated_metadata.json')
metadata = None
with open(metadata_path) as json_file:
metadata = json.load(json_file)
sample_metadata = metadata['samples']
# In[ ]:
experiments = metadata['experiments']
sample_to_study = {}
for study in experiments:
for accession in experiments[study]['sample_accession_codes']:
sample_to_study[accession] = study
# ## Sepsis classification
# In[8]:
in_files = glob.glob('../../results/single_label.*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[9]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics
# In[10]:
plot = ggplot(sepsis_metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('PCA vs untransformed data for classifying sepsis')
print(plot)
# In[11]:
plot = ggplot(sepsis_metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_jitter(size=3)
plot += ggtitle('PCA vs untransformed data for classifying sepsis')
print(plot)
# ## All labels
# In[12]:
in_files = glob.glob('../../results/all_labels.*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[13]:
metrics = None
for path in in_files:
if metrics is None:
metrics = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('all_labels.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
metrics['unsupervised'] = unsupervised_model
metrics['supervised'] = supervised_model
else:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('all_labels.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
metrics = pd.concat([metrics, new_df])
metrics
# In[14]:
plot = ggplot(metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('PCA vs untransformed data for all label classification')
print(plot)
# In[15]:
plot = ggplot(metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_jitter(size=2)
plot += ggtitle('PCA vs untransformed data for all label classification')
print(plot)
# # Subsets of healthy labels
# In[16]:
in_files = glob.glob('../../results/subset_label.sepsis*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[17]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
sepsis_metrics
# In[18]:
print(sepsis_metrics[sepsis_metrics['healthy_used'] == 1])
# In[19]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', ))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[20]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[21]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Same analysis, but with tb instead of sepsis
# In[22]:
in_files = glob.glob('../../results/subset_label.tb*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[23]:
tuberculosis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
tuberculosis_metrics = pd.concat([tuberculosis_metrics, new_df])
tuberculosis_metrics = tuberculosis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
tuberculosis_metrics['healthy_used'] = tuberculosis_metrics['healthy_used'].round(1)
tuberculosis_metrics
# In[24]:
print(tuberculosis_metrics[tuberculosis_metrics['healthy_used'] == 1])
# In[25]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[26]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[27]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Supervised Results Only
# The results above show that unsupervised learning mostly hurts performance rather than helping.
# The visualizations below compare each model based only on its supervised results.
# In[28]:
supervised_sepsis = sepsis_metrics[sepsis_metrics['unsupervised'] == 'untransformed']
# In[29]:
plot = ggplot(supervised_sepsis, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[30]:
supervised_tb = tuberculosis_metrics[tuberculosis_metrics['unsupervised'] == 'untransformed']
# In[31]:
plot = ggplot(supervised_tb, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[32]:
plot = ggplot(supervised_tb, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Batch Effect Correction
# In[33]:
in_files = glob.glob('../../results/subset_label.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[34]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
print(model_info)
model_info = model_info.split('.')
print(model_info)
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
sepsis_metrics
# In[35]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', ))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[36]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## TB Batch effect corrected
# In[37]:
in_files = glob.glob('../../results/subset_label.tb*be_corrected.tsv')
print(in_files[:5])
# In[38]:
tuberculosis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
tuberculosis_metrics = pd.concat([tuberculosis_metrics, new_df])
tuberculosis_metrics = tuberculosis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
tuberculosis_metrics['healthy_used'] = tuberculosis_metrics['healthy_used'].round(1)
tuberculosis_metrics
# In[39]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[40]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Better Metrics, Same Label Distribution in Train and Val sets
# In[11]:
in_files = glob.glob('../../results/keep_ratios.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[12]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of data used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
sepsis_metrics = sepsis_metrics[~(sepsis_metrics['supervised'] == 'deep_net')]
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[13]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[14]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[15]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='balanced_accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[16]:
sepsis_stat_df = create_dataset_stat_df(sepsis_metrics,
sample_to_study,
sample_metadata,
sample_to_label,
'sepsis')
sepsis_stat_df.tail(5)
# In[17]:
ggplot(sepsis_stat_df, aes(x='train_val_diff',
y='balanced_accuracy',
color='val_disease_count')) + geom_point() + facet_grid('model ~ .')
# In[18]:
plot = ggplot(sepsis_metrics, aes(x='train sample count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Effect of All Sepsis Data')
plot
# ## Same Distribution Tuberculosis
# In[19]:
in_files = glob.glob('../../results/keep_ratios.tb*be_corrected.tsv')
print(in_files[:5])
# In[20]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
tb_metrics = pd.concat([tb_metrics, new_df])
tb_metrics = tb_metrics.rename({'fraction of data used': 'healthy_used'}, axis='columns')
tb_metrics['healthy_used'] = tb_metrics['healthy_used'].round(1)
tb_metrics
# In[21]:
plot = ggplot(tb_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[22]:
plot = ggplot(tb_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[23]:
plot = ggplot(tb_metrics, aes(x='factor(healthy_used)', y='balanced_accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[24]:
tb_stat_df = create_dataset_stat_df(tb_metrics,
sample_to_study,
sample_metadata,
sample_to_label,
'tb')
tb_stat_df.tail(5)
# In[55]:
ggplot(tb_stat_df, aes(x='train_val_diff',
y='balanced_accuracy',
color='val_disease_count')) + geom_point() + facet_grid('model ~ .')
# In[25]:
plot = ggplot(tb_metrics, aes(x='train sample count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot
# ## Results from Small Datasets
# In[57]:
in_files = glob.glob('../../results/small_subsets.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[58]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics['train_count'] = sepsis_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
sepsis_metrics = sepsis_metrics[~(sepsis_metrics['supervised'] == 'deep_net')]
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[59]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Dataset Size Effects (equal label counts)')
print(plot)
# In[60]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Datset Size by Model (equal label counts)')
print(plot)
# In[61]:
plot = ggplot(sepsis_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Sepsis Crossover Point')
plot
# ## Small Training Set TB
# In[62]:
in_files = glob.glob('../../results/small_subsets.tb*be_corrected.tsv')
print(in_files[:5])
# In[63]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
tb_metrics = pd.concat([tb_metrics, new_df])
tb_metrics['train_count'] = tb_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
tb_metrics = tb_metrics[~(tb_metrics['supervised'] == 'deep_net')]
tb_metrics['supervised'] = tb_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tb_metrics
# In[64]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('TB Dataset Size Effects (equal label counts)')
print(plot)
# In[65]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('TB Dataset Size vs Models (equal label counts)')
print(plot)
# In[66]:
plot = ggplot(tb_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth(method='loess')
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('TB (lack of a) Crossover Point')
plot
# ## Small training sets without be correction
# In[67]:
in_files = glob.glob('../../results/small_subsets.sepsis*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[68]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics['train_count'] = sepsis_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
sepsis_metrics = sepsis_metrics[~(sepsis_metrics['supervised'] == 'deep_net')]
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[69]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Dataset Size Effects (equal label counts)')
print(plot)
# In[70]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Datset Size by Model (equal label counts)')
print(plot)
# In[71]:
plot = ggplot(sepsis_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Sepsis Crossover Point')
plot
# ## TB Not Batch Effect Corrected
# In[72]:
in_files = glob.glob('../../results/small_subsets.tb*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[73]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
tb_metrics = pd.concat([tb_metrics, new_df])
tb_metrics['train_count'] = tb_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
tb_metrics = tb_metrics[~(tb_metrics['supervised'] == 'deep_net')]
tb_metrics['supervised'] = tb_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tb_metrics
# In[74]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('tb Dataset Size Effects (equal label counts)')
print(plot)
# In[75]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('tb Datset Size by Model (equal label counts)')
print(plot)
# In[76]:
plot = ggplot(tb_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('tb Crossover Point')
plot
# ## Large training sets without be correction
# In[6]:
in_files = glob.glob('../../results/keep_ratios.sepsis*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[9]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics['train_count'] = sepsis_metrics['train sample count']
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[10]:
plot = ggplot(sepsis_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Sepsis Crossover Point')
plot
# ## TB Not Batch Effect Corrected
# In[80]:
in_files = glob.glob('../../results/keep_ratios.tb*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[81]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
tb_metrics = pd.concat([tb_metrics, new_df])
tb_metrics['train_count'] = tb_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
tb_metrics = tb_metrics[~(tb_metrics['supervised'] == 'deep_net')]
tb_metrics['supervised'] = tb_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tb_metrics
# In[82]:
plot = ggplot(tb_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('tb Crossover Point')
plot
# ## Lupus Analyses
# In[83]:
in_files = glob.glob('../../results/keep_ratios.lupus*.tsv')
in_files = [file for file in in_files if 'be_corrected' in file]
print(in_files[:5])
# In[84]:
lupus_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('lupus.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
lupus_metrics = pd.concat([lupus_metrics, new_df])
lupus_metrics['train_count'] = lupus_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
lupus_metrics = lupus_metrics[~(lupus_metrics['supervised'] == 'deep_net')]
lupus_metrics['supervised'] = lupus_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
lupus_metrics
# In[85]:
plot = ggplot(lupus_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('lupus Crossover Point')
plot
# ## Lupus Not Batch Effect Corrected
# In[86]:
in_files = glob.glob('../../results/keep_ratios.lupus*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[87]:
lupus_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('lupus.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
lupus_metrics = pd.concat([lupus_metrics, new_df])
lupus_metrics['train_count'] = lupus_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
lupus_metrics = lupus_metrics[~(lupus_metrics['supervised'] == 'deep_net')]
lupus_metrics['supervised'] = lupus_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
lupus_metrics
# In[88]:
plot = ggplot(lupus_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('lupus Crossover Point')
plot
# ## Tissue Prediction
# In[2]:
in_files = glob.glob('../../results/Blood.Breast.*.tsv')
in_files = [f for f in in_files if 'be_corrected' not in f]
print(in_files[:5])
# In[3]:
tissue_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('Breast.')[-1]
model_info = model_info.split('_')
supervised_model = '_'.join(model_info[:2])
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-1]
tissue_metrics = pd.concat([tissue_metrics, new_df])
tissue_metrics['train_count'] = tissue_metrics['train sample count']
tissue_metrics['supervised'] = tissue_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tissue_metrics
# In[4]:
plot = ggplot(tissue_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Blood vs Breast Tissue Prediction')
plot
# ### BE Corrected binary tissue classification
# In[5]:
in_files = glob.glob('../../results/Blood.Breast.*.tsv')
in_files = [f for f in in_files if 'be_corrected' in f]
print(in_files[:5])
# In[6]:
tissue_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('Breast.')[-1]
model_info = model_info.split('_')
supervised_model = '_'.join(model_info[:2])
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-1]
tissue_metrics = pd.concat([tissue_metrics, new_df])
tissue_metrics['train_count'] = tissue_metrics['train sample count']
tissue_metrics['supervised'] = tissue_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tissue_metrics
# In[7]:
plot = ggplot(tissue_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Blood vs Breast Tissue Prediction')
plot
# ### All Tissue Predictions
# In[8]:
in_files = glob.glob('../../results/all-tissue.*.tsv')
in_files = [f for f in in_files if 'be_corrected' not in f]
print(in_files[:5])
# In[9]:
tissue_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('all-tissue.')[-1]
model_info = model_info.split('_')
supervised_model = '_'.join(model_info[:-1])
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-1]
tissue_metrics = pd.concat([tissue_metrics, new_df])
tissue_metrics['train_count'] = tissue_metrics['train sample count']
tissue_metrics['supervised'] = tissue_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tissue_metrics['supervised'] = tissue_metrics['supervised'].str.replace('deep_net', 'five_layer_net')
tissue_metrics
# In[10]:
plot = ggplot(tissue_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += ggtitle('All Tissue Prediction')
plot
# ## Imputation pretraining
# In[11]:
in_files = glob.glob('../../results/tissue_impute.*.tsv')
print(in_files[:5])
# In[12]:
tissue_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tissue_impute.')[-1]
model_info = model_info.split('_')
supervised_model = '_'.join(model_info[:2])
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-1]
tissue_metrics = pd.concat([tissue_metrics, new_df])
tissue_metrics['train_count'] = tissue_metrics['train sample count']
tissue_metrics = tissue_metrics.rename({'impute_samples': 'pretraining_sample_count'}, axis='columns')
tissue_metrics
# In[13]:
plot = ggplot(tissue_metrics, aes(x='train_count', y='balanced_accuracy', color='factor(supervised)'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += ggtitle('Effects of Imputation on Multiclass Tissue Prediction')
plot += facet_grid('pretraining_sample_count ~ .')
plot
# In[14]:
plot = ggplot(tissue_metrics, aes(x='train_count', y='balanced_accuracy', color='factor(pretraining_sample_count)'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += ggtitle('Effects of Imputation on Multiclass Tissue Prediction')
plot
# ## Adding BioBERT Embeddings
# In[15]:
in_files = glob.glob('../../results/all-tissue-biobert*.tsv')
print(in_files[:5])
# In[16]:
tissue_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('biobert.')[-1]
model_info = model_info.split('_')
supervised_model = '_'.join(model_info[:2])
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-1]
tissue_metrics = | pd.concat([tissue_metrics, new_df]) | pandas.concat |
import pandas as pd
import pandas.testing as pdt
import pytest
from cape_privacy.pandas.transformations import ReversibleTokenizer
from cape_privacy.pandas.transformations import Tokenizer
from cape_privacy.pandas.transformations import TokenReverser
def test_tokenizer():
transform = Tokenizer(key="secret_key")
df = pd.DataFrame({"name": ["Alice", "Bob"]})
expected = pd.DataFrame(
{
"name": [
"<KEY>",
"<KEY>",
]
}
)
df["name"] = transform(df["name"])
| pdt.assert_frame_equal(df, expected) | pandas.testing.assert_frame_equal |
import pytest
import numpy as np
import pandas as pd
from delphi_jhu.geo import geo_map, add_county_pop, INCIDENCE_BASE
from delphi_utils import GeoMapper
from delphi_jhu.geo import geo_map, INCIDENCE_BASE
class TestGeoMap:
def test_incorrect_geo(self, jhu_confirmed_test_data):
df = jhu_confirmed_test_data
with pytest.raises(ValueError):
geo_map(df, "département", "cumulative_prop")
def test_fips(self, jhu_confirmed_test_data):
test_df = jhu_confirmed_test_data
fips_df = geo_map(test_df, "county", "cumulative_prop")
test_df = fips_df.loc[(fips_df.geo_id == "01001") & (fips_df.timestamp == "2020-09-15")]
gmpr = GeoMapper()
fips_pop = gmpr.get_crosswalk("fips", "pop")
pop01001 = float(fips_pop.loc[fips_pop.fips == "01001", "pop"])
expected_df = pd.DataFrame({
"geo_id": "01001",
"timestamp": pd.Timestamp("2020-09-15"),
"cumulative_counts": 1463.0,
"new_counts": 1463.0,
"population": pop01001,
"incidence": 1463 / pop01001 * INCIDENCE_BASE,
"cumulative_prop": 1463 / pop01001 * INCIDENCE_BASE
}, index=[36])
pd.testing.assert_frame_equal(test_df, expected_df)
# Make sure the prop signals don't have inf values
assert not fips_df["incidence"].eq(np.inf).any()
assert not fips_df["cumulative_prop"].eq(np.inf).any()
# make sure no megafips reported
assert not any(i[0].endswith("000") for i in fips_df.geo_id)
def test_state_hhs_nation(self, jhu_confirmed_test_data):
df = jhu_confirmed_test_data
state_df = geo_map(df, "state", "cumulative_prop")
test_df = state_df.loc[(state_df.geo_id == "al") & (state_df.timestamp == "2020-09-15")]
gmpr = GeoMapper()
state_pop = gmpr.get_crosswalk("state_id", "pop")
al_pop = float(state_pop.loc[state_pop.state_id == "al", "pop"])
expected_df = pd.DataFrame({
"timestamp": pd.Timestamp("2020-09-15"),
"geo_id": "al",
"cumulative_counts": 140160.0,
"new_counts": 140160.0,
"population": al_pop,
"incidence": 140160 / al_pop * INCIDENCE_BASE,
"cumulative_prop": 140160 / al_pop * INCIDENCE_BASE
}, index=[1])
| pd.testing.assert_frame_equal(test_df, expected_df) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python
# coding: utf-8
import os
import sys
import git
import shutil
import logging
import argparse
parser = argparse.ArgumentParser()
import time
from datetime import timedelta, datetime
from dateutil import tz
from jinja2 import Environment, FileSystemLoader
import yaml
import json
import pandas
import math
import requests
import ephem
from pyorbital.orbital import Orbital
from pyorbital import tlefile
from geographiclib.geodesic import Geodesic
import osgeo.ogr
import osgeo.osr
from osgeo import ogr
import simplekml
import folium
from folium import plugins
from colour import Color
parser.add_argument('--configuration', dest='configuration', default='config.yaml',help='ground station configuration')
parser.add_argument('--start', dest='start', help='start time YYYY-MM-DDTHH:MM:SS format')
parser.add_argument('--period', dest='period', help='ground station configuration')
parser.add_argument('--output_path', dest='output_path', help='ground station configuration')
args = parser.parse_args()
def getredtoblack(number):
"""
Return a list of colours for a give number of samples
"""
if number < 256:
numbercolours = 256
else:
numbercolours = number
rangevalue = int((numbercolours)/4)
if number == 1:
return(['red'])
if number == 2:
return(['red', 'black'])
red = Color("red")
orange = Color("orange")
yellow = Color("yellow")
white = Color("white")
black = Color("black")
redorange = tuple(red.range_to(orange, rangevalue))
orangeyellow = tuple(orange.range_to(yellow, rangevalue+1))
yellowwhite = tuple(yellow.range_to(white,rangevalue+1))
whiteblack = tuple(white.range_to(black,rangevalue+1))
redtoblack = redorange + orangeyellow[1:] + yellowwhite[1:] + whiteblack[1:]
redtoblacklist = list(redtoblack)
colours = []
position = 0
increment = int(len(redtoblacklist)/(number-1))
while (position < len(redtoblacklist)):
colours.append(redtoblacklist[position])
position = position + increment
colours[len(colours)-1] = black
return(colours)
def local_time(utc, local):
"""Return a local time representation of UTC"""
to_zone = tz.gettz(local)
from_zone = tz.tzutc()
# Set UTC datetime object to UTC
utc = utc.replace(tzinfo=from_zone)
# Convert time zone
return utc.astimezone(to_zone)
def download_file(url):
"""Return local filename from input URL"""
local_filename = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return local_filename
def get_tles():
""" Return a list of tuples of kepler parameters for each satellite"""
tle_file_basenames = []
for url in tle_files:
tle_file_basename = os.path.basename(url)
tle_file_basenames.append(tle_file_basename)
try:
os.remove(tle_file_basename)
except OSError:
pass
try:
download_file(url)
logging.info("Downloading URL from configuration sources:" + tle_file_basename)
except OSError:
logging.info("Process could not download file:" + tle_file_basename)
return ()
with open('tles.txt', 'w') as outfile:
for fname in tle_file_basenames:
with open(fname) as infile:
for line in infile:
outfile.write(line)
return clean_combine_tles()
def clean_combine_tles():
tles = open('tles.txt', 'r').readlines()
logging.info("Combining TLE from configuration sources")
# strip off the header tokens and newlines
tles = [item.strip() for item in tles]
# clean up the lines
tles = [(tles[i], tles[i + 1], tles[i + 2]) for i in range(0, len(tles) - 2, 3)]
return(tles)
def check_tle_is_current():
"""Returns whether the TLE file was retrieved less than 24hrs ago and initiates download"""
# Check if TLE exists and get it if not
if os.path.exists('tles.txt'):
tle_retrieve_time = datetime.fromtimestamp(os.path.getmtime('tles.txt'))
else:
get_tles()
logging.info('tle.txt does not exist')
tle_retrieve_time = datetime.utcnow()
# check for stale tle and update if required
tle_timesinceretrieve = datetime.now() - tle_retrieve_time
# Compare TLE age against daily update requirement
if tle_timesinceretrieve > timedelta(hours=24):
tle_retrieve_time = datetime.utcnow()
get_tles()
logging.info("Stale TLE detected and replaced at" + str(tle_retrieve_time))
else:
logging.info("TLE file current and does not require update")
logging.info("HH:MM:SS since TLE retrieval:"+ str(tle_timesinceretrieve))
return clean_combine_tles()
def update_points_crossing_antimeridian(listofpoints):
"""Return list of points which account for crossing of antimeridian"""
if len(listofpoints) == 0:
return(listofpoints)
antimeridianlistofpoints = []
diff = 325.0
referencepointlon = listofpoints[0]['lon2']
crossingeasttowest = False
for point in listofpoints:
# Confirm no crossing of antimeridian between points
if (abs(referencepointlon - point['lon2']) <= diff):
antimeridianlistofpoints.append(point)
referencepointlon = listofpoints[0]['lon2']
else:
# if crossing antimeridian west to east add 360 i.e. diff will be negative
if ((referencepointlon - point['lon2']) >= diff):
point['lon2'] = point['lon2']+360
antimeridianlistofpoints.append(point)
referencepointlon = point['lon2']
# if crossing antimeridian east to west minus 360 i.e. diff will be negative
if ((referencepointlon - point['lon2']) <= (diff*-1)):
point['lon2'] = point['lon2']-360
antimeridianlistofpoints.append(point)
referencepointlon = point['lon2']
# Crossing east to west
crossingeasttowest = True
if crossingeasttowest == True:
for point in antimeridianlistofpoints:
point['lon2'] = point['lon2']+360
return(antimeridianlistofpoints)
def get_vector_file(attributes, input_points, poly_or_line, ogr_output, ogr_format):
""" Returns spatial layer built on inputs - attributes, points, polygon or line, output in specified format"""
input_points = update_points_crossing_antimeridian(input_points)
spatialReference = osgeo.osr.SpatialReference()
spatialReference.ImportFromProj4('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')
# if no points passed for ogr build return
if len(input_points) == 0:
return ()
try:
os.remove(ogr_output)
except OSError:
pass
ogr.UseExceptions()
driver = ogr.GetDriverByName(ogr_format)
if os.path.exists(ogr_output):
driver.DeleteDataSource(ogr_output)
ds = driver.CreateDataSource(ogr_output)
if poly_or_line == 'polygon':
geomtype = ogr.wkbPolygon
if poly_or_line == 'line':
geomtype = ogr.wkbLineString
if poly_or_line == 'point':
geomtype = ogr.wkbPoint
if ds is None:
logging.info("Process could not create file")
sys.exit(1)
layer = ds.CreateLayer(attributes['Satellite name'], geom_type=geomtype)
field_definition= ogr.FieldDefn('Satellite :', ogr.OFTString)
field_definition.SetWidth(30)
layer.CreateField(field_definition)
field_definition = ogr.FieldDefn('Sensor :', ogr.OFTString)
field_definition.SetWidth(30)
layer.CreateField(field_definition)
field_definition = ogr.FieldDefn('Orbit height :', ogr.OFTString)
field_definition.SetWidth(30)
layer.CreateField(field_definition)
layer.CreateField(ogr.FieldDefn('Orbit number :', ogr.OFTInteger))
'''
field_definition = ogr.FieldDefn('Current UTC time :', ogr.OFTString)
field_definition.SetWidth(30)
layer.CreateField(field_definition)
field_definition = ogr.FieldDefn('Minutes to horizon :', ogr.OFTString)
field_definition.SetWidth(30)
layer.CreateField(field_definition)
'''
field_definition = ogr.FieldDefn('Acquisition of Signal Local :', ogr.OFTString)
field_definition.SetWidth(30)
layer.CreateField(field_definition)
field_definition = ogr.FieldDefn('Acquisition of Signal UTC :', ogr.OFTString)
field_definition.SetWidth(30)
layer.CreateField(field_definition)
field_definition = ogr.FieldDefn('Loss of Signal UTC :', ogr.OFTString)
field_definition.SetWidth(30)
layer.CreateField(field_definition)
field_definition = ogr.FieldDefn('Transit time :', ogr.OFTString)
field_definition.SetWidth(30)
layer.CreateField(field_definition)
field_definition = ogr.FieldDefn('Node :', ogr.OFTString)
field_definition.SetWidth(30)
layer.CreateField(field_definition)
feature_definition = layer.GetLayerDefn()
feature = ogr.Feature(feature_definition)
feature.SetField('Satellite :', attributes['Satellite name'])
feature.SetField('Sensor :', attributes['Sensor code'])
feature.SetField('Orbit height :', attributes['Orbit height'])
feature.SetField('Orbit number :', attributes['Orbit'])
'''
feature.SetField('Current UTC time :', str(attributes['Current time']))
feature.SetField('Minutes to horizon :', attributes['Minutes to horizon'])
'''
feature.SetField('Acquisition of Signal Local :', attributes['Local time'])
feature.SetField('Acquisition of Signal UTC :', str(attributes['AOS time']))
feature.SetField('Loss of Signal UTC :', str(attributes['LOS time']))
feature.SetField('Transit time :', str(attributes['Transit time']))
feature.SetField('Node :', attributes['Node'])
if poly_or_line == 'point':
point = ogr.Geometry(ogr.wkbPoint)
for x in input_points:
point.AddPoint(x['lon2'], x['lat2'], x['alt2'])
feature.SetGeometry(point)
layer.CreateFeature(feature)
point.Destroy()
if poly_or_line == 'line':
line = ogr.Geometry(type=ogr.wkbLineString)
for x in input_points:
line.AddPoint(x['lon2'], x['lat2'], x['alt2'])
feature.SetGeometry(line)
layer.CreateFeature(feature)
line.Destroy()
if poly_or_line == 'polygon':
ring = ogr.Geometry(ogr.wkbLinearRing)
#input_points = update_points_crossing_antimeridian(input_points, ogr_format, 'antimeridian.geojson')
for x in input_points:
ring.AddPoint(x['lon2'], x['lat2'])
poly = ogr.Geometry(ogr.wkbPolygon)
ring.color = "red"
poly.AddGeometry(ring)
feature.SetGeometry(poly)
layer.CreateFeature(feature)
ring.Destroy()
poly.Destroy()
feature.Destroy()
ds.Destroy()
# for KML - Add altitude to GeoJSON if ogr_format=="GeoJSON" and change colour of track to yellow
if ogr_format == "GeoJSON":
if poly_or_line == 'line':
replace_string_in_file(ogr_output, '<LineString>', '<LineString><altitudeMode>absolute</altitudeMode>')
replace_string_in_file(ogr_output, 'ff0000ff', 'ffffffff')
if poly_or_line == 'point':
replace_string_in_file(ogr_output, '<Point>', '<Point><altitudeMode>absolute</altitudeMode>')
if poly_or_line == 'polygon':
replace_string_in_file(ogr_output, '<PolyStyle><fill>0</fill>',
'<PolyStyle><color>7f0000ff</color><fill>1</fill>')
return ()
def replace_string_in_file(infile, text_to_find, text_to_insert):
in_file = open(infile, 'r')
temporary = open(os.path.join(output_path, 'tmp.txt'), 'w')
for line in in_file:
temporary.write(line.replace(text_to_find, text_to_insert))
in_file.close()
temporary.close()
os.remove(infile)
shutil.move(os.path.join(output_path, 'tmp.txt'), infile)
return ()
def get_effective_heading(satellite, oi_deg, latitude, longitude, tle_orbit_radius, daily_revolutions):
"""Returns the effective heading of the satellite"""
lat_rad = math.radians(latitude) # Latitude in radians
oi_rad = math.radians(oi_deg) # Orbital Inclination (OI) [radians]
orbit_radius = tle_orbit_radius * 1000.0 # Orbit Radius (R) [m]
# np = 5925.816 # Nodal Period [sec] = 5925.816
np = (24 * 60 * 60) / daily_revolutions
av = 2 * math.pi / np # Angular Velocity (V0) [rad/sec] = 0.001060307189285 =2*PI()/E8
#sr = 0 # Sensor Roll (r) [degrees] = 0
# TODO put earth parameters into a dict and add support for other spheroids GRS1980 etc.
# Earth Stuff (WGS84)
one_on_f = 298.257223563 # Inverse flattening 1/f = 298.257223563
#f = 1 / one_on_f # flattening
r = 6378137 # Radius (a) [m] = 6378137
e = 1 - math.pow((1 - 1 / one_on_f), 2) # Eccentricity (e^2) = 0.00669438 =1-(1-1/I5)^2
wO = 0.000072722052 # rotation (w0) [rad/sec] = 7.2722052E-05
xfac = math.sqrt(1 - e * (2 - e) * (math.pow(math.sin(math.radians(latitude)), 2)))
phi_rad = math.asin((1 - e) * math.sin(math.radians(latitude)) / xfac) # Phi0' (Geocentric latitude)
# phi_deg = math.degrees(phi_rad) # Phi0' (Degrees)
n = r / math.sqrt(1 - e * (math.pow(math.sin(math.radians(latitude)), 2))) # N
altphi_rad = latitude - 180 * math.asin(
n * e * math.sin(lat_rad) * math.cos(lat_rad) / orbit_radius) / math.pi # Alt Phi0'(Radians)
rho_rad = math.acos(math.sin(altphi_rad * math.pi / 180) / math.sin(oi_rad)) # Rho (Radians)
beta = -1 * (math.atan(1 / (math.tan(oi_rad) * math.sin(rho_rad))) * 180 / math.pi) # Heading Beta (degrees)
#xn = n * xfac # Xn
#altitude = (orbit_radius - xn) / 1000 # altitude
#altitude_ = (orbit_radius * math.cos(altphi_rad / 180 * math.pi) / math.cos(lat_rad) - n) / 1000
rotation = math.atan((wO * math.cos(phi_rad) * math.cos(beta * math.pi / 180)) / (
av + wO * math.cos(phi_rad) * math.sin(beta * math.pi / 180))) * 180 / math.pi
eh = beta + rotation
alpha12 = eh
#s = 0.5 * 185000 # s = distance in metres
effective_heading = alpha12
return effective_heading
def folium_timespan_geojson_html(schedule, satname):
timespan_map = folium.Map(location=[-26, 132], tiles='OpenStreetMap',zoom_start=3)
#lines=[]
polygons = []
lines=[]
polygons = []
#polygontuples = ()
colorindex = 0
colorlut = getredtoblack(len(schedule)+1)
for i in schedule:
pointlist = []
polygonlist = []
#timeslist = []
datelist = []
for x in i['Orbit line']:
pointlist.append([x['lon2'], x['lat2']])
datelist.append(str(x['time']).replace(" ","T"))
# folium expects a time for each point - could use iterate for len of points and time period to get times per point - or add to dict from original function which uses time
lines.append({'coordinates': pointlist, 'dates': datelist, 'color': str(colorlut[colorindex]),'weight': 2})
datelist = []
for x in i['Swath polygon']:
#polygonlist.append([x['lat2'],x['lon2']])
pointtuple = (x['lon2'],x['lat2'])
polygonlist.append(pointtuple)
datelist.append(str(x['time']).replace(" ","T"))
polygons.append({'coordinates': [(tuple(polygonlist),)], 'dates': datelist, 'color': str(colorlut[colorindex]),'weight': 2})
colorindex = colorindex +1
features = [
{
'type': 'Feature',
'geometry': {
'type': 'MultiPolygon',
'coordinates': polygon['coordinates'],
},
'properties': {
'times': polygon['dates'],
'style': {
'color': polygon['color'],
'opacity': 0.1,
'weight': polygon['weight'] if 'weight' in polygon else 5
}
}
}
#for line in lines
for polygon in polygons
]
featureslines = [
{
'type': 'Feature',
'geometry': {
'type': 'LineString',
'coordinates': line['coordinates'],
},
'properties': {
'times': line['dates'],
'style': {
'color': line['color'],
'dash-array': '[4]',
'weight': line['weight'] if 'weight' in line else 5
}
}
}
for line in lines
#for polygon in polygons
]
for featureline in featureslines:
features.append(featureline)
plugins.TimestampedGeoJson({
'type': 'FeatureCollection',
'features': features,
}, period='PT1M', duration=None,add_last_point=False, auto_play=True, transition_time=1, time_slider_drag_update=True).add_to(timespan_map)
plugins.Fullscreen(
position='topright',
title='Expand me',
title_cancel='Exit me',
force_separate_button=True
).add_to(timespan_map)
timespan_map
'''
colorindex = 0
for i in schedule:
pointlist = []
polygonlist = []
#timeslist = []
datelist = []
for x in i['Orbit line']:
pointlist.append([x['lon2'], x['lat2']])
datelist.append(str(x['time']).replace(" ","T"))
# folium expects a time for each point - could use iterate for len of points and time period to get times per point - or add to dict from original function which uses time
polygons.append({'coordinates': pointlist, 'dates': datelist, 'color': colorlut[colorindex],'weight': 2})
colorindex = colorindex +1
datelist = []
for x in i['Swath polygon']:
#polygonlist.append([x['lat2'],x['lon2']])
pointtuple = (x['lon2'],x['lat2'])
polygonlist.append(pointtuple)
datelist.append(str(x['time']).replace(" ","T"))
polygons.append({'coordinates': [(tuple(polygonlist),)], 'dates': datelist, 'color': colorlut[colorindex],'weight': 2})
#colorindex = colorindex +1
#print(polygonlist)
features = [
{
'type': 'Feature',
'geometry': {
#'type': 'LineString',
#'coordinates': line['coordinates'],
'type': 'MultiPolygon',
'coordinates': polygon['coordinates'],
},
'properties': {
#'times': line['dates'],
#'style': {
# 'color': line['color'],
# 'weight': line['weight'] if 'weight' in line else 5
'times': polygon['dates'],
'style': {
'color': polygon['color'],
'weight': polygon['weight'] if 'weight' in polygon else 5
}
}
}
#for line in lines
for polygon in polygons
]
#print(features)
#plugins.TimestampedGeoJson({
# 'type': 'FeatureCollection',
# 'features': features,
#}, period='PT1M', duration=None,add_last_point=False, auto_play=True, transition_time=1).add_to(timespan_map)
style_function = lambda x: {'fillColor': '#00ffff'}
gj = folium.GeoJson({
'type': 'FeatureCollection',
'features': features,
}, style_function=style_function)
#gj.add_child(folium.GeoJsonTooltip(fields=["Satellite :", "Sensor :", \
# "Orbit height :", "Orbit number :", \
# "Acquisition of Signal Local :", "Acquisition of Signal UTC :", \
# "Loss of Signal UTC :", "Transit time :", \
# "Node :"]))
gj.add_to(timespan_map)
#timespan_map.add_child(folium.GeoJsonTooltip(fields=["Satellite :", "Sensor :", \
# "Orbit height :", "Orbit number :", \
# "Acquisition of Signal Local :", "Acquisition of Signal UTC :", \
# "Loss of Signal UTC :", "Transit time :", \
# "Node :"]))
plugins.Fullscreen(
position='topright',
title='Expand me',
title_cancel='Exit me',
force_separate_button=True
).add_to(timespan_map)
'''
foliumtimespanhtml = os.path.join(output_path, satname + "." + ground_station_name + ".timespan.map.html")
timespan_map.save(foliumtimespanhtml)
def add_layer_to_map(swathfile, layername, color):
"""Adds the input layer to the folium map"""
if not os.path.isfile(swathfile):
return ()
geojsonlayer = json.loads(open(swathfile).read())
df = | pandas.DataFrame(geojsonlayer) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
# multiplication
assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series * 1, timedelta_series)
assert_series_equal(1 * timedelta_series, timedelta_series)
assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)
assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
datetime_series * 1
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1
with tm.assertRaises(TypeError):
datetime_series * 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1.0
# division
assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s, s2), (s2, s)]:
self.assertRaises(TypeError, lambda: x == y)
self.assertRaises(TypeError, lambda: x != y)
self.assertRaises(TypeError, lambda: x >= y)
self.assertRaises(TypeError, lambda: x > y)
self.assertRaises(TypeError, lambda: x < y)
self.assertRaises(TypeError, lambda: x <= y)
def test_more_na_comparisons(self):
for dtype in [None, object]:
left = Series(['a', np.nan, 'c'], dtype=dtype)
right = Series(['a', np.nan, 'd'], dtype=dtype)
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_nat_comparisons(self):
data = [([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')],
[pd.NaT, pd.NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')],
[pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])]
# add lhs / rhs switched data
data = data + [(r, l) for l, r in data]
for l, r in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
# Series, Index
for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]:
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
def test_nat_comparisons_scalar(self):
data = [[pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')],
[pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]]
for l in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
expected = Series([False, False, False])
assert_series_equal(left == pd.NaT, expected)
assert_series_equal(pd.NaT == left, expected)
expected = Series([True, True, True])
assert_series_equal(left != pd.NaT, expected)
assert_series_equal(pd.NaT != left, expected)
expected = Series([False, False, False])
assert_series_equal(left < pd.NaT, expected)
assert_series_equal(pd.NaT > left, expected)
assert_series_equal(left <= pd.NaT, expected)
assert_series_equal(pd.NaT >= left, expected)
assert_series_equal(left > pd.NaT, expected)
assert_series_equal(pd.NaT < left, expected)
assert_series_equal(left >= pd.NaT, expected)
assert_series_equal(pd.NaT <= left, expected)
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
self.assertRaises(ValueError, a.__lt__, b)
a = Series([1, 2])
b = Series([2, 3, 4])
self.assertRaises(ValueError, a.__eq__, b)
def test_comparison_label_based(self):
# GH 4947
# comparisons should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False], list('abc'))
result = a | b
assert_series_equal(result, expected)
expected = Series([True, False, False], list('abc'))
result = a ^ b
assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([False, True, False, False], list('abcd'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False, False], list('abcd'))
result = a | b
assert_series_equal(result, expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result, expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ['z'])
expected = Series([False, False, False, False], list('abcz'))
assert_series_equal(result, expected)
result = a | Series([1], ['z'])
expected = Series([True, True, False, False], list('abcz'))
assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]), Series([1], ['z']),
Series(np.nan, b.index), Series(np.nan, a.index)]:
result = a[a | e]
assert_series_equal(result, a[a])
for e in [Series(['z'])]:
if compat.PY3:
with tm.assert_produces_warning(RuntimeWarning):
result = a[a | e]
else:
result = a[a | e]
assert_series_equal(result, a[a])
# vs scalars
index = list('bca')
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
assert_series_equal(result, expected)
for v in [np.nan, 'foo']:
self.assertRaises(TypeError, lambda: t | v)
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [False, 0]:
result = Series([True, False, True], index=index) & v
expected = Series([False, False, False], index=index)
assert_series_equal(result, expected)
for v in [np.nan]:
self.assertRaises(TypeError, lambda: t & v)
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
tm.assert_series_equal(left.eq(right), left == right)
tm.assert_series_equal(left.ne(right), left != right)
tm.assert_series_equal(left.le(right), left < right)
tm.assert_series_equal(left.lt(right), left <= right)
tm.assert_series_equal(left.gt(right), left > right)
tm.assert_series_equal(left.ge(right), left >= right)
# axis
for axis in [0, None, 'index']:
tm.assert_series_equal(left.eq(right, axis=axis), left == right)
tm.assert_series_equal(left.ne(right, axis=axis), left != right)
tm.assert_series_equal(left.le(right, axis=axis), left < right)
tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
tm.assert_series_equal(left.gt(right, axis=axis), left > right)
tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
#
msg = 'No axis named 1 for object type'
for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:
with tm.assertRaisesRegexp(ValueError, msg):
getattr(left, op)(right, axis=1)
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.eq(right), exp)
exp = pd.Series([True, True, False, True], index=list('abcd'))
tm.assert_series_equal(left.ne(right), exp)
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.le(right), exp)
exp = pd.Series([False, False, False, False], index=list('abcd'))
tm.assert_series_equal(left.lt(right), exp)
exp = pd.Series([False, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right), exp)
exp = pd.Series([False, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right), exp)
def test_comparison_flex_alignment_fill(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.eq(right, fill_value=2), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.ne(right, fill_value=2), exp)
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.le(right, fill_value=0), exp)
exp = pd.Series([False, False, False, True], index=list('abcd'))
tm.assert_series_equal(left.lt(right, fill_value=0), exp)
exp = pd.Series([True, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right, fill_value=0), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right, fill_value=0), exp)
def test_operators_bitwise(self):
# GH 9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list('abc'))
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list('abc'))
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
self.assertRaises(TypeError, lambda: s_1111 & 'a')
self.assertRaises(TypeError, lambda: s_1111 & ['a', 'b', 'c', 'd'])
self.assertRaises(TypeError, lambda: s_0123 & np.NaN)
self.assertRaises(TypeError, lambda: s_0123 & 3.14)
self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])
# s_0123 will be all false now because of reindexing like s_tft
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_tft & s_0123, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_tft & s_0123, exp)
# s_tft will be all false now because of reindexing like s_0123
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_0123 & s_tft, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_0123 & s_tft, exp)
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd = Series(['a', 'b', np.NaN, 'd'])
res = s_0123 & s_abNd
expected = s_ftft
assert_series_equal(res, expected)
def test_scalar_na_cmp_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
def tester(a, b):
return a & b
self.assertRaises(TypeError, tester, s, datetime(2005, 1, 1))
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
assert_series_equal(tester(s, list(s)), expected)
d = DataFrame({'A': s})
# TODO: Fix this exception - needs to be fixed! (see GH5035)
# (previously this was a TypeError because series returned
# NotImplemented
# this is an alignment issue; these are equivalent
# https://github.com/pydata/pandas/issues/5284
self.assertRaises(ValueError, lambda: d.__and__(s, axis='columns'))
self.assertRaises(ValueError, tester, s, d)
# this is wrong as its not a boolean result
# result = d.__and__(s,axis='index')
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assertTrue(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assertEqual(len(result), 0)
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = Series(self.ts.values[:-5] + int_ts.values,
index=self.ts.index[:-5], name='ts')
self.assert_series_equal(added[:-5], expected)
def test_operators_reverse_object(self):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10), dtype=object)
def _check_op(arr, op):
result = op(1., arr)
expected = op(1., arr.astype(float))
assert_series_equal(result.astype(float), expected)
_check_op(arr, operator.add)
_check_op(arr, operator.sub)
_check_op(arr, operator.mul)
_check_op(arr, operator.truediv)
_check_op(arr, operator.floordiv)
def test_arith_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
exp = pd.Series([3.0, 4.0, np.nan, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 + s2, exp)
tm.assert_series_equal(s2 + s1, exp)
exp = pd.DataFrame({'x': [3.0, 4.0, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() + s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() + s1.to_frame(), exp)
# different length
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
exp = pd.Series([3, 4, 5, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 + s4, exp)
tm.assert_series_equal(s4 + s3, exp)
exp = pd.DataFrame({'x': [3, 4, 5, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() + s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() + s3.to_frame(), exp)
def test_comp_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
for l, r in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]:
msg = "Can only compare identically-labeled Series objects"
with tm.assertRaisesRegexp(ValueError, msg):
l == r
with tm.assertRaisesRegexp(ValueError, msg):
l != r
with | tm.assertRaisesRegexp(ValueError, msg) | pandas.util.testing.assertRaisesRegexp |
# import pandas, numpy, and matplotlib
import pandas as pd
from feature_engine.encoding import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import SelectKBest, chi2, mutual_info_classif
from feature_engine.discretisation import EqualFrequencyDiscretiser as efd
pd.set_option('display.width', 75)
pd.set_option('display.max_columns', 20)
pd.set_option('display.max_rows', 100)
pd.options.display.float_format = '{:,.3f}'.format
nls97compba = pd.read_csv("data/nls97compba.csv")
feature_cols = ['gender','satverbal','satmath','gpascience',
'gpaenglish','gpamath','gpaoverall','motherhighgrade',
'fatherhighgrade','parentincome']
# separate NLS data into train and test datasets
X_train, X_test, y_train, y_test = \
train_test_split(nls97compba[feature_cols],\
nls97compba[['completedba']], test_size=0.3, random_state=0)
# encode the gender feature and scale the other features
ohe = OneHotEncoder(drop_last=True, variables=['gender'])
X_train_enc = ohe.fit_transform(X_train)
scaler = StandardScaler()
standcols = X_train_enc.iloc[:,:-1].columns
X_train_enc = \
pd.DataFrame(scaler.fit_transform(X_train_enc[standcols]),
columns=standcols, index=X_train_enc.index).\
join(X_train_enc[['gender_Female']])
# select 5 best features for predicting college completion using mutual information
ksel = SelectKBest(score_func=mutual_info_classif, k=5)
ksel.fit(X_train_enc, y_train.values.ravel())
selcols = X_train_enc.columns[ksel.get_support()]
selcols
| pd.DataFrame({'score': ksel.scores_,
'feature': X_train_enc.columns},
columns=['feature','score']) | pandas.DataFrame |
"""
@author: <NAME>
file: main_queue.py
"""
from __future__ import print_function
from scoop import futures
import multiprocessing
import numpy as np
import pandas as pd
import timeit
import ZIPapliences as A_ZIP
class load_generation:
""" Class prepares the system for generating load
Attributes
----------
START_TIME_Q (pandas datetime): start time to generate load data
END_TIME_Q (pandas datetime): end time to generate load data
Queue_type (int): 0=inf; 1=C; 2=Ct
P_U_B (int): percentage upper boud --> e.g. 2 = 200% from the reference
physical_machine (int): 1 = single node 2 = multiple nodes
NUM_WORKERS (int): number of workers used when generating load in a single node
NUM_HOMES (int): number of homes being generated
OUT_PUT_FILE_NAME_pre (str): file path to write output
OUT_PUT_FILE_NAME (str): prefix of file name to be writen
OUT_PUT_FILE_NAME_end (str): end of file name
OUT_PUT_FILE_NAME_summary_pre (str): file path to write output
OUT_PUT_FILE_NAME_summary (str): prefix of summary file name to be writen
TIME_DELT (pandas datetime): 1 minute
TIME_DELT_FH (pandas datetime): 1 hour
TIME_DELT_FD (pandas datetime): 1 day
base_max (float): rescaling load reference uper bound
base_min (float): rescaling load reference lower bound
ref_load (pandas series): reference load
DF_A (pandas dataframe): appliances characteristics
DF_ZIP_summer (pandas dataframe): appliances participation during the summer
DF_ZIP_winter (pandas dataframe): appliances participation during the winter
DF_ZIP_spring (pandas dataframe): appliances participation during the spring
APP_parameter_list (list): input parameters
[(float) p.u. percentage of schedulable appliances 0.5=50%,
(int) appliance set size,
(int) average power rating in Watts,
(int) stander power rating in Watts,
(float) average duration in hours,
(float) stander duration in hours,
(float) average duration of the scheduling window in hours,
(float) stander duration of the scheduling window in hours]
Methods
-------
__init__ : create object with the parameters for the load generation
read_data : load input data
"""
def __init__(self,ST,ET,T,P,M,NW,NH):
""" Create load_generation object
Parameters
----------
ST (str): start time to generate load data e.g. '2014-01-01 00:00:00'
ET (str): end time to generate load data
T (int): 0=inf; 1=C; 2=Ct
P (int): percentage upper boud --> e.g. 2 = 200% from the reference
M (int): 1 = single node 2 = multiple nodes
NW (int): number of workers used when generating load in a single node
NH (int): number of homes being generated
"""
self.START_TIME_Q = pd.to_datetime(ST)
self.END_TIME_Q = pd.to_datetime(ET)
self.Queue_type = T
self.P_U_B = P
self.physical_machine = M
self.NUM_WORKERS = NW
self.NUM_HOMES = NH
self.OUT_PUT_FILE_NAME_pre = 'outputdata/multy/'
self.OUT_PUT_FILE_NAME = 'multHDF'
self.OUT_PUT_FILE_NAME_end = '.h5'
self.OUT_PUT_FILE_NAME_summary_pre = 'outputdata/summary/'
self.OUT_PUT_FILE_NAME_summary = 'summaryHDF'
#Auxiliary variables
self.TIME_DELT = pd.to_timedelta('0 days 00:01:00')
self.TIME_DELT_FH = pd.to_timedelta('0 days 01:00:00')
self.TIME_DELT_FD = pd.to_timedelta('1 days 00:00:00')
self.base_max = 5000.0
self.base_min = 100.0
#From data
self.ref_load = None
self.DF_A = None
self.DF_ZIP_summer = None
self.DF_ZIP_winter = None
self.DF_ZIP_spring = None
#DEFINITIONS APPLIANCES
self.APP_parameter_list = [0.5,100,500,100,0.5,0.25,6.0,2.0]
def read_data(self,IF='inputdata/'):
""" Load reference load and appliance data
Parameters
----------
IF (str): folder of input data
"""
# Reference Energy
sys_load = pd.read_hdf(IF+'load_data.h5')
sys_load = sys_load['load']
sys_load = sys_load[self.START_TIME_Q:self.END_TIME_Q+self.TIME_DELT_FD]#*1e6 #DATA IS IN HOURS
sys_load = sys_load.resample(self.TIME_DELT_FH).max().ffill()#fix empty locations
scale_min = sys_load[self.START_TIME_Q:self.END_TIME_Q].min()
scale_max = sys_load[self.START_TIME_Q:self.END_TIME_Q].max()
ref = sys_load
ref = self.base_min+((ref-scale_min)/(scale_max-scale_min))*(self.base_max-self.base_min)
ref.name = 'Load [W]'
ref = ref.resample(self.TIME_DELT).max().interpolate(method='polynomial', order=0,limit_direction='forward')
self.ref_load = ref
# ZIP load
self.DF_A = pd.read_csv(IF+'ZIP_appliances.csv')
self.DF_ZIP_summer = pd.read_csv(IF+'ZIP_summer.csv')
self.DF_ZIP_winter = pd.read_csv(IF+'ZIP_winter.csv')
self.DF_ZIP_spring = | pd.read_csv(IF+'ZIP_spring.csv') | pandas.read_csv |
Subsets and Splits