prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
<NAME>017
PanCancer Classifier
scripts/pancancer_classifier.py
Usage: Run in command line with required command argument:
python pancancer_classifier.py --genes $GENES
Where GENES is a comma separated string. There are also optional arguments:
--diseases comma separated string of disease types for classifier
default: Auto (will pick diseases from filter args)
--folds number of cross validation folds
default: 5
--drop drop the input genes from the X matrix
default: False if flag omitted
--copy_number optional flag to supplement copy number to define Y
default: False if flag omitted
--filter_count int of low count of mutation to include disease
default: 15
--filter_prop float of low proportion of mutated samples per disease
default: 0.05
--num_features int of number of genes to include in classifier
default: 8000
--alphas comma separated string of alphas to test in pipeline
default: '0.1,0.15,0.2,0.5,0.8,1'
--l1_ratios comma separated string of l1 parameters to test
default: '0,0.1,0.15,0.18,0.2,0.3'
--alt_genes comma separated string of alternative genes to test
default: None
--alt_diseases comma separated string of alternative diseases to test
default: Auto
--alt_filter_count int of low count of mutations to include alt_diseases
default: 15
--alt_filter_prop float of low proportion of mutated samples alt_disease
default: 0.05
--alt_folder string of where to save the classifier figures
default: Auto
--remove_hyper store_true: remove hypermutated samples
default: False if flag omitted
--keep_intermediate store_true: keep intermediate roc curve items
default: False if flag omitted
--x_matrix string of which feature matrix to use
default: raw
Output:
ROC curves, AUROC across diseases, and classifier coefficients
"""
import os
import sys
import warnings
import pandas as pd
import csv
import argparse
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import train_test_split, cross_val_predict
from dask_searchcv import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from statsmodels.robust.scale import mad
sys.path.insert(0, os.path.join('scripts', 'util'))
from tcga_util import get_args, get_threshold_metrics, integrate_copy_number
from tcga_util import shuffle_columns
# Load command arguments
args = get_args()
genes = args.genes.split(',')
diseases = args.diseases.split(',')
folds = int(args.folds)
drop = args.drop
drop_rasopathy = args.drop_rasopathy
copy_number = args.copy_number
filter_count = int(args.filter_count)
filter_prop = float(args.filter_prop)
num_features_kept = args.num_features
alphas = [float(x) for x in args.alphas.split(',')]
l1_ratios = [float(x) for x in args.l1_ratios.split(',')]
alt_genes = args.alt_genes.split(',')
alt_filter_count = int(args.alt_filter_count)
alt_filter_prop = float(args.alt_filter_prop)
alt_diseases = args.alt_diseases.split(',')
alt_folder = args.alt_folder
remove_hyper = args.remove_hyper
keep_inter = args.keep_intermediate
x_matrix = args.x_matrix
shuffled = args.shuffled
shuffled_before_training = args.shuffled_before_training
no_mutation = args.no_mutation
drop_expression = args.drop_expression
drop_covariates = args.drop_covariates
warnings.filterwarnings('ignore',
message='Changing the shape of non-C contiguous array')
# Generate file names for output
genes_folder = args.genes.replace(',', '_')
base_folder = os.path.join('classifiers', genes_folder)
if alt_folder != 'Auto':
base_folder = alt_folder
if not os.path.exists(base_folder):
os.makedirs(base_folder)
else:
warnings.warn('Classifier may have already been built! Classifier results'
' will be overwritten!', category=Warning)
disease_folder = os.path.join(base_folder, 'disease')
if not os.path.exists(disease_folder):
os.makedirs(disease_folder)
count_table_file = os.path.join(base_folder, 'summary_counts.csv')
cv_heatmap_file = os.path.join(base_folder, 'cv_heatmap.pdf')
full_roc_file = os.path.join(base_folder, 'all_disease_roc.pdf')
full_pr_file = os.path.join(base_folder, 'all_disease_pr.pdf')
disease_roc_file = os.path.join(base_folder, 'disease', 'classifier_roc_')
disease_pr_file = os.path.join(base_folder, 'disease', 'classifier_pr_')
dis_summary_auroc_file = os.path.join(base_folder, 'disease_auroc.pdf')
dis_summary_aupr_file = os.path.join(base_folder, 'disease_aupr.pdf')
classifier_file = os.path.join(base_folder, 'classifier_coefficients.tsv')
roc_results_file = os.path.join(base_folder, 'pancan_roc_results.tsv')
alt_gene_base = 'alt_gene_{}_alt_disease_{}'.format(
args.alt_genes.replace(',', '_'),
args.alt_diseases.replace(',', '_'))
alt_count_table_file = os.path.join(base_folder, 'alt_summary_counts.csv')
alt_gene_auroc_file = os.path.join(base_folder,
'{}_auroc_bar.pdf'.format(alt_gene_base))
alt_gene_aupr_file = os.path.join(base_folder,
'{}_aupr_bar.pdf'.format(alt_gene_base))
alt_gene_summary_file = os.path.join(base_folder,
'{}_summary.tsv'.format(alt_gene_base))
# Load Datasets
if x_matrix == 'raw':
expr_file = os.path.join('data', 'pancan_rnaseq_freeze.tsv.gz')
else:
expr_file = x_matrix
mut_file = os.path.join('data', 'pancan_mutation_freeze.tsv.gz')
mut_burden_file = os.path.join('data', 'mutation_burden_freeze.tsv')
sample_freeze_file = os.path.join('data', 'sample_freeze.tsv')
rnaseq_full_df = pd.read_table(expr_file, index_col=0)
mutation_df = pd.read_table(mut_file, index_col=0)
sample_freeze = pd.read_table(sample_freeze_file, index_col=0)
mut_burden = pd.read_table(mut_burden_file)
# Construct data for classifier
common_genes = set(mutation_df.columns).intersection(genes)
if x_matrix == 'raw':
common_genes = list(common_genes.intersection(rnaseq_full_df.columns))
else:
common_genes = list(common_genes)
y = mutation_df[common_genes]
missing_genes = set(genes).difference(common_genes)
if len(common_genes) != len(genes):
warnings.warn('All input genes were not found in data. The missing genes '
'are {}'.format(missing_genes), category=Warning)
if drop:
if x_matrix == 'raw':
rnaseq_full_df.drop(common_genes, axis=1, inplace=True)
if drop_rasopathy:
rasopathy_genes = set(['BRAF', 'CBL', 'HRAS', 'KRAS', 'MAP2K1', 'MAP2K2',
'NF1', 'NRAS', 'PTPN11', 'RAF1', 'SHOC2', 'SOS1',
'SPRED1', 'RIT1'])
rasopathy_drop = list(rasopathy_genes.intersection(rnaseq_full_df.columns))
rnaseq_full_df.drop(rasopathy_drop, axis=1, inplace=True)
# Incorporate copy number for gene activation/inactivation
if copy_number:
# Load copy number matrices
copy_loss_file = os.path.join('data', 'copy_number_loss_status.tsv.gz')
copy_loss_df = pd.read_table(copy_loss_file, index_col=0)
copy_gain_file = os.path.join('data', 'copy_number_gain_status.tsv.gz')
copy_gain_df = pd.read_table(copy_gain_file, index_col=0)
# Load cancer gene classification table
vogel_file = os.path.join('data', 'vogelstein_cancergenes.tsv')
cancer_genes = pd.read_table(vogel_file)
y = integrate_copy_number(y=y, cancer_genes_df=cancer_genes,
genes=common_genes, loss_df=copy_loss_df,
gain_df=copy_gain_df,
include_mutation=no_mutation)
# Process y matrix
y = y.assign(total_status=y.max(axis=1))
y = y.reset_index().merge(sample_freeze,
how='left').set_index('SAMPLE_BARCODE')
count_df = y.groupby('DISEASE').sum()
prop_df = count_df.divide(y['DISEASE'].value_counts(sort=False).sort_index(),
axis=0)
count_table = count_df.merge(prop_df, left_index=True, right_index=True,
suffixes=('_count', '_proportion'))
count_table.to_csv(count_table_file)
# Filter diseases
mut_count = count_df['total_status']
prop = prop_df['total_status']
if diseases[0] == 'Auto':
filter_disease = (mut_count > filter_count) & (prop > filter_prop)
diseases = filter_disease.index[filter_disease].tolist()
# Load mutation burden and process covariates
y_df = y[y.DISEASE.isin(diseases)].total_status
common_samples = list(set(y_df.index) & set(rnaseq_full_df.index))
y_df = y_df.loc[common_samples]
rnaseq_df = rnaseq_full_df.loc[y_df.index, :]
if remove_hyper:
burden_filter = mut_burden['log10_mut'] < 5 * mut_burden['log10_mut'].std()
mut_burden = mut_burden[burden_filter]
y_matrix = mut_burden.merge( | pd.DataFrame(y_df) | pandas.DataFrame |
import logging
import time
from logging import Logger
from typing import Sequence, Dict
import numpy as np
import pandas as pd
from pandas.core.generic import NDFrame
from scipy.stats import trim_mean
from active_learning_ratio_estimation.model.ratio_model import calibrated_param_scan, param_scan, exact_param_scan
from active_learning_ratio_estimation.dataset import SinglyParameterizedRatioDataset, ParamGrid, ParamIterator
from active_learning_ratio_estimation.model import DenseClassifier, SinglyParameterizedRatioModel, FlipoutClassifier
from util.distributions import triple_mixture
def create_models(
theta_0: float,
hyperparams: Dict,
) -> Dict[str, SinglyParameterizedRatioModel]:
# regular, uncalibrated model
regular_estimator = DenseClassifier(activation='tanh', **hyperparams)
regular_uncalibrated = SinglyParameterizedRatioModel(theta_0=theta_0, clf=regular_estimator)
# bayesian, uncalibrated model
bayesian_estimator = FlipoutClassifier(activation='relu', **hyperparams)
bayesian_uncalibrated = SinglyParameterizedRatioModel(theta_0=theta_0, clf=bayesian_estimator)
models = {
'Regular': regular_uncalibrated,
'Bayesian': bayesian_uncalibrated,
}
return models
def fit_models(
models: Dict[str, SinglyParameterizedRatioModel],
train_dataset: SinglyParameterizedRatioDataset,
logger: Logger
) -> pd.DataFrame:
for model_name, model in models.items():
logger.info(f'Fitting {model_name} model.')
model.fit(train_dataset.x, train_dataset.theta_1s, train_dataset.y)
history = | pd.DataFrame(model.clf.history_.history) | pandas.DataFrame |
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.patches import Ellipse
import matplotlib.transforms as transforms
from sklearn.model_selection import train_test_split
def confidence_ellipse(x, y, ax, n_std=3.0, facecolor='none', **kwargs):
"""
Create a plot of the covariance confidence ellipse of *x* and *y*.
Parameters
----------
x, y : array-like, shape (n, )
Input data.
ax : matplotlib.axes.Axes
The axes object to draw the ellipse into.
n_std : float
The number of standard deviations to determine the ellipse's radiuses.
**kwargs
Forwarded to `~matplotlib.patches.Ellipse`
Returns
-------
matplotlib.patches.Ellipse
"""
if x.size != y.size:
raise ValueError("x and y must be the same size")
cov = np.cov(x, y)
pearson = cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1])
# Using a special case to obtain the eigenvalues of this
# two-dimensionl dataset.
ell_radius_x = np.sqrt(1 + pearson)
ell_radius_y = np.sqrt(1 - pearson)
ellipse = Ellipse((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2,
facecolor=facecolor, **kwargs)
# Calculating the stdandard deviation of x from
# the squareroot of the variance and multiplying
# with the given number of standard deviations.
scale_x = np.sqrt(cov[0, 0]) * n_std
mean_x = np.mean(x)
# calculating the stdandard deviation of y ...
scale_y = np.sqrt(cov[1, 1]) * n_std
mean_y = np.mean(y)
transf = transforms.Affine2D().rotate_deg(45).scale(scale_x, scale_y).translate(mean_x, mean_y)
ellipse.set_transform(transf + ax.transData)
return ax.add_patch(ellipse)
# read data and create dataframes
length = 3100
coord_list = ['all', 'x', 'y', 'z']
# create global variables to store x,y,z and xyz data
for i in range(4):
globals()[f'df_UR5_{coord_list[i]}'] = pd.DataFrame()
home = "data/Kernels/5_7_2022"
for folder in os.listdir(home):
# if "_ex" in folder:
if os.path.isdir(f"{home}/{folder}"):
for file in os.listdir(f"{home}/{folder}"):
if '.csv' in file:
df = | pd.read_csv(f"{home}/{folder}/{file}") | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
import os
import _pickle as pickle
import matplotlib.pyplot as plt
from matplotlib import cm
import seaborn as sns
from scipy.signal import savgol_filter
from scipy.signal import find_peaks
from scipy.interpolate import interp1d
def get_peak_parameters_of_ITD_curve(x,y):
_ITD = x
_FR = y
# interp
_x = _ITD
_y = _FR
_f = interp1d(_x, _y)
_dt = 0.1
_ITD_interp = np.arange(_x.min(),_x.max()+_dt,_dt)
_ITD_interp = np.clip(_ITD_interp,_x.min(),_x.max())
_FR_interp = _f(_ITD_interp)
_smoothing_w_length = int(len(_FR_interp)/25)+1
_smoothed_FR = savgol_filter(_FR_interp,_smoothing_w_length, 2)
# find peaks
_peaks, _ = find_peaks(_smoothed_FR, prominence=1, width=20)
_peak_heights = _smoothed_FR[_peaks]
_peak = _peaks[_peak_heights==_peak_heights.max()]
_ref_height = _smoothed_FR[_peak] * 0.5
_half_width_seg = np.arange(len(_smoothed_FR))[_smoothed_FR>_ref_height][[0,-1]]
_peak_ITD = _ITD_interp[_peak][0]
_peak_FR = _smoothed_FR[_peak][0]
_half_width = _ITD_interp[_half_width_seg][1] -_ITD_interp[_half_width_seg][0]
return _peak_ITD, _peak_FR,_half_width
def interp_and_smooth_ITD_curve(x,y,scale = 25):
_ITD = x
_FR = y
# interp
_x = _ITD
_y = _FR
_f = interp1d(_x, _y)
_dt = 0.1
_ITD_interp = np.arange(_x.min(),_x.max()+_dt,_dt)
_ITD_interp = np.clip(_ITD_interp,_x.min(),_x.max())
_FR_interp = _f(_ITD_interp)
_smoothing_w_length = int(len(_FR_interp)/scale)+1
_smoothed_FR = savgol_filter(_FR_interp,_smoothing_w_length, 2)
return _ITD_interp,_smoothed_FR
def run_data_visualization():
print('>> Compute peak parameters and plot figures')
# load config
_config_filename = 'config.pckl'
with open(_config_filename, 'rb') as _file:
_config = pickle.load(_file)
import_path = _config['basic']['result_path']
export_path = _config['basic']['fig_path']
os.makedirs(export_path,exist_ok=True)
os.makedirs(import_path,exist_ok=True)
_dir_pckl = os.listdir(import_path)
_sort_str = '.pckl'
_dir_pckl = [x for x in _dir_pckl if x[-len(_sort_str):]==_sort_str]
# list files
df_files = pd.DataFrame(_dir_pckl,columns=['filename'])
df_files['cate_name'] = df_files['filename'].apply(lambda x:x.split('.')[0])
df_files['seed'] = df_files['cate_name'].apply(lambda x:int(x.split('_')[-1]))
# load ITD dataset
print('> loading ITD dataset')
_stimuli_dir = _config['basic']['ANF_path']
_list_input_files = os.listdir(_stimuli_dir)
_list_input_files_seed = [x.split('.')[0].split('_')[-1] for x in _list_input_files]
_dict_df_ITD = {}
for _seed,_file in zip(_list_input_files_seed,_list_input_files):
_load_filename =_stimuli_dir+_file
f = open(_load_filename, 'rb')
_load_profile_data = pickle.load(f)
f.close()
_dict_df_ITD[int(_seed)] = _load_profile_data[-1]
_df_ITD = _load_profile_data[-1]
fs_input_sounds = _load_profile_data[6]
_sti_duration = np.mean(_df_ITD['End_R'] - _df_ITD['Start_R'] )/fs_input_sounds
# overall interations for ITD curves, peaks, and accuracy
print('> start computing ITD tuning curve parameters')
_df_decoded = []
for _i_file,_row in list(df_files.iterrows()):
# load data
_load_name = import_path+_row['filename']
print('> processing file:',_load_name)
with open(_load_name, 'rb') as fp:
_loaded_list = pickle.load(fp)
_spike_count_list,acc_set,df_sensitivity = _loaded_list
_acc,_conf,_df_decoding_error,_MSE,_dict_class_info = acc_set
_sensitivity_acc = df_sensitivity[df_sensitivity['ITD']==10]['Accuracy'].values[0]
_df_decoding_error['Matched'] = _df_decoding_error['Target'] == _df_decoding_error['Predicted']
_acc = _df_decoding_error.Matched.mean()
_df = _row.copy()
_df['Accuracy'] = np.round(_acc,5)
_df['MSE'] = np.round(_MSE,5)
_df['PairedAcc'] = np.round(_sensitivity_acc,5)
_df_info = _df.copy()
### compute peak information
_seed = _row['seed']
_df_spike_count = pd.DataFrame(dict(zip(['SpikeCount','Neuron','Sequence'],_spike_count_list)))
_df_ITD = _dict_df_ITD[int(_seed)].copy()
_df_spike_count = _df_spike_count.merge(_df_ITD, on ='Sequence')
_df_spike_count['ITD']*=1e6
_df_spike_count['Object'] = _df_spike_count['Neuron'].where(_df_spike_count['Neuron']>=10000,'MSO_L')
_df_spike_count['Object'] = _df_spike_count['Object'].mask(_df_spike_count['Neuron']>=10000,'MSO_R')
## quantile for selecting neurons for curve-plotting
_quantile_thres = [0.2, 0.8]
_df_neuron_left = _df_spike_count[_df_spike_count['Object']=='MSO_L'].pivot_table(index = 'Neuron',values='SpikeCount')
_df_neuron_left = _df_neuron_left[_df_neuron_left['SpikeCount']!=0]
_lower_quantile = _df_neuron_left.SpikeCount.quantile(_quantile_thres[0])
_upper_quantile = _df_neuron_left.SpikeCount.quantile(_quantile_thres[1])
_neuron_list_left = list(_df_neuron_left[(_df_neuron_left.SpikeCount>_lower_quantile)&
(_df_neuron_left.SpikeCount<_upper_quantile)].index)
_df_neuron_right = _df_spike_count[_df_spike_count['Object']=='MSO_R'].pivot_table(index = 'Neuron',values='SpikeCount')
_df_neuron_right = _df_neuron_right[_df_neuron_right['SpikeCount']!=0]
_lower_quantile = _df_neuron_right.SpikeCount.quantile(_quantile_thres[0])
_upper_quantile = _df_neuron_right.SpikeCount.quantile(_quantile_thres[1])
_neuron_list_right = list(_df_neuron_right[(_df_neuron_right.SpikeCount>_lower_quantile)&
(_df_neuron_right.SpikeCount<_upper_quantile)].index)
_neuron_list = _neuron_list_left + _neuron_list_right
_df_sorted = _df_spike_count[_df_spike_count['Neuron'].isin(_neuron_list)]
## get ITD-FR tunning curve
_df_ITD_curve = _df_sorted.pivot_table(index = ['Object','ITD'],values='SpikeCount')
_df_ITD_curve = pd.DataFrame(_df_ITD_curve.to_records())
_df_ITD_curve['FiringRate'] = _df_ITD_curve['SpikeCount']/_sti_duration
for _object in _df_ITD_curve.Object.unique():
_df_sig = _df_ITD_curve[_df_ITD_curve['Object']==_object]
_ITD = _df_sig['ITD'].to_numpy()
_FR = _df_sig['FiringRate'].to_numpy()
# compute peak parameters
_peak_ITD, _peak_FR,_half_width = get_peak_parameters_of_ITD_curve(_ITD,_FR)
_df_peak = _df_info.copy()
_df_peak['Object'] = _object
_df_peak['PeakITD'] = _peak_ITD
_df_peak['PeakHeight'] = _peak_FR
_df_peak['PeakWidth'] = _half_width
_df_peak['ITD_v'] = _ITD
_df_peak['FR_v'] = _FR
_df_decoded.append(pd.DataFrame(_df_peak).T)
_df_decoded = | pd.concat(_df_decoded) | pandas.concat |
from datetime import datetime
import pandas as pd
import requests
BASE_URL = "https://api.coingecko.com/api/v3/"
def get_coin_market_chart(token_name):
url = BASE_URL+"coins/{}/market_chart?vs_currency=usd&days=max&interval=daily".format(token_name)
resp = requests.get(url)
if resp.status_code == 200:
result = resp.json()
else:
print('Request Error: {}: invalid token name'.format(resp.status_code))
result = {}
return result
def get_coin_market_cap(token_name):
market_chart = get_coin_market_chart(token_name)
market_caps = [{
'Date':datetime.utcfromtimestamp(int(item[0]/1000)),
'mcap':item[1]
} for item in market_chart['market_caps']]
df = | pd.DataFrame(market_caps) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 29 16:04:09 2020
@author: gabygerlach
"""
import numpy as np
import pandas as pd
#import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
### CODE from you tube videos
diamonds = pd.read_csv('data/diamonds.csv.gz')
y=diamonds.pop('price')
d1 =diamonds.select_dtypes(include='number')
d2 = diamonds.select_dtypes(exclude = 'number')
from sklearn.preprocessing import scale
bl = pd.DataFrame(scale(d1))
bl.columns = d1.columns
d1=bl
d2 = | pd.get_dummies(d2) | pandas.get_dummies |
""" MarketBeat View """
__docformat__ = "numpy"
import argparse
import numpy as np
import pandas as pd
from typing import List
from gamestonk_terminal.helper_funcs import (
check_positive,
parse_known_args_and_warn,
)
from gamestonk_terminal.discovery import marketbeat_model
def ratings_view(other_args: List[str]):
"""Prints top ratings updates
Parameters
----------
other_args : List[str]
argparse other args - ["-t", "100"]
"""
parser = argparse.ArgumentParser(
add_help=False,
prog="ratings",
description="""Top ratings updates. [Source: MarketBeat]""",
)
parser.add_argument(
"-t",
"--threshold",
action="store",
dest="n_threshold",
type=check_positive,
default=100,
help="Minimum threshold in percentage change between current and target price to show ratings",
)
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-t")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
try:
ratings = marketbeat_model.get_ratings()
df_ratings = | pd.DataFrame(ratings) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 23 11:32:40 2021
@author: bianca
"""
# +++ IMPLIED GROWTH RATES +++ #
import pandas as pd
import os
import numpy as np
from datetime import datetime
## select Merge File US Equity and WRDS
df_assig3 = pd.read_csv("./data/external/assignment_3_sp500_constituents_with_daily_mdata.csv")
df_assig3['date']
df_assig3.head(29)
df_assig3.iloc[:]
# S&P 500 Index
SP500_avg = df_assig3.groupby(['date']).agg({
'prc': 'mean'
})
SP500_avg
SP500_index = pd.DataFrame(SP500_avg, columns= ['prc'])
SP500_index
SP500_index.to_csv("./data/external/assignment_3_SP500_index.csv")
# summarize data
df2 = df_assig3.groupby(['date', 'comnam', 'permno', 'hsiccd', 'ticker', 'gvkey']).agg({
'prc': 'mean'
})
df2
df2.to_csv("./data/external/assignment_3_sp500_summary.csv")
df_CAR_UE = pd.read_csv("./data/external/assign3_summary CAR UE.csv")
df_CAR_UE['hsiccd']
type(['month'])
df_CAR_UE['month'].astype(int)
## create 'month' and 'year' column
pd.Timestamp(df_CAR_UE['date'])
## define 'quarters'
def quartal(month):
if month <= 3: return(1)
if month <= 6: return(2)
if month <= 9: return(3)
return (4)
## create 'quarter' column
df_CAR_UE['quarter'] = [quartal(m) for m in df_CAR_UE['month']]
print(df_CAR_UE['quarter'])
df_CAR_UE.to_csv("./data/external/assignment_3_sp500_output.csv")
## sum industry
df_ind = df_CAR_UE.groupby(['comnam', 'gvkey', 'hsiccd']).agg({'prc': 'mean'})
df_ind.to_csv("./data/external/assignment_3_sp500_ind.csv")
df_ind2 = df_CAR_UE.groupby(['comnam', 'gvkey']).agg({'hsiccd': 'count'})
df_ind2
df_ind.to_csv("./data/external/assignment_3_sp500_ind2.csv")
df_fund = | pd.read_csv("./data/external/assignment_3_cmp_fundamentals.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import os
import yfinance as yf
# tags (part of statement to keep)
tags = ['AssetsCurrent', 'CashAndCashEquivalentsAtCarryingValue', 'LiabilitiesCurrent', 'Liabilities',
'IncomeTaxesPaid', 'IncomeTaxesPaidNet', 'DepreciationDepletionAndAmortization',
'OperatingIncomeLoss', 'Assets', 'StockholdersEquity', 'WeightedAverageNumberOfSharesOutstandingBasic',
'NetCashProvidedByUsedInOperatingActivities', 'OtherLiabilitiesNoncurrent',
'RevenueFromContractWithCustomerExcludingAssessedTax', 'CostOfGoodsAndServicesSold', 'CostOfRevenue',
'EarningsPerShareBasic', 'Revenues', 'ResearchAndDevelopmentExpense', 'SellingGeneralAndAdministrativeExpense',
'PaymentsToAcquirePropertyPlantAndEquipment']
# the quarters the final dataframe should contain
quarters = ['2017Q4', '2018Q1', '2018Q2', '2018Q3', '2018Q4', '2019Q1', '2019Q2', '2019Q3', '2019Q4',
'2020Q1', '2020Q2', '2020Q3', '2020Q4', '2021Q1', '2021Q2', '2021Q3', '2021Q4']
# year of last annual statement
year = 2020
def create_quarterly_data(quarters, tags):
"""
:param quarters: quarters for which financial statement should be considered
:param tags: parts of financial statement which should be considered
:return: returns quarterly data for all tags and quarters
"""
# final DataFrame
financial_statement = pd.DataFrame()
# get ticker data
ticker = pd.read_json('./data/ticker.txt').T
# transform ticker
ticker = ticker.drop(['title'], axis=1)
ticker.columns = ['cik', 'ticker']
ticker['cik'] = ticker['cik'].astype(str)
# some cik's have more than one ticker
ticker = ticker.drop_duplicates(subset='cik')
# iterate though all the folders in data
for folder in os.listdir('./data'):
if folder.startswith("20"):
print(folder)
# import data
sub = pd.read_csv(f"./data/{folder}/sub.txt", sep="\t", dtype={"cik": str})
num = pd.read_csv(f"./data/{folder}/num.txt", sep="\t")
# transform sub data
# filter for needed columns
cols = ['adsh', 'cik', 'name', 'sic', 'form', 'filed', 'period', 'accepted', 'fy', 'fp']
sub = sub[cols]
# change to datetype
sub["accepted"] = pd.to_datetime(sub["accepted"])
sub["period"] = pd.to_datetime(sub["period"], format="%Y%m%d")
sub["filed"] = pd.to_datetime(sub["filed"], format="%Y%m%d")
# filter for quarterly and annual financial data
sub = sub[sub['form'].isin(['10-K', '10-Q'])]
# delete duplicates --> company handed in same file in same period --> only keep newest
sub = sub.loc[sub.sort_values(by=["filed", "accepted"], ascending=False).groupby(["cik", "period"]).cumcount() == 0]
# drop not needed columns
sub = sub.drop(['filed', 'period', 'accepted', 'fy', 'fp'], axis=1)
# merge ticker and sub data
sub = sub.merge(ticker)
# transform num data
# change to datetype
num["ddate"] = pd.to_datetime(num["ddate"], format="%Y%m%d")
# filter for needed columns
cols_num = ['adsh', 'tag', 'ddate', 'qtrs', 'value']
num = num[cols_num]
# only select current date and quarter
num = num.loc[
num.sort_values(by=["ddate", "qtrs"], ascending=(False, True)).groupby(["adsh", "tag"]).cumcount() == 0]
# create quarter and year column
num['quarter'] = num['ddate'].dt.quarter
num['year'] = num['ddate'].dt.year
# merge num and sub data
num = num.merge(sub)
# append to financial statement
financial_statement = financial_statement.append(num)
# filter for needed tags
financial_statement = financial_statement[financial_statement.loc[:, 'tag'].isin(tags)]
financial_statement = financial_statement.sort_values(by='ddate')
# create Q4 data
for idx, row in financial_statement.iterrows():
# when form is 10-K --> annual report --> change to quarterly
if row['form'] == '10-K':
# some companies only deliver full year numbers (qtrs = 4)
if row['qtrs'] == 4:
# filter for company and tag, select index of last 3 quarters
idx_list = financial_statement[
(financial_statement.loc[:, 'ticker'] == row['ticker']) &
(financial_statement.loc[:, 'tag'] == row['tag'])].index.values.tolist()
idx_position = idx_list.index(idx)
idx_list = idx_list[idx_position - 3:idx_position]
# subtract sum of all quarters from full year number
financial_statement.at[idx, 'value'] = financial_statement.at[idx, 'value'] - \
financial_statement.loc[idx_list, 'value'].sum()
# reset index
financial_statement = financial_statement.reset_index()
# only keep last 16 quarters
financial_statement['year-quarter'] = financial_statement['year'].astype(str) + 'Q' + financial_statement['quarter'].astype(str)
financial_statement = financial_statement.loc[financial_statement['year-quarter'].isin(quarters)]
financial_statement = financial_statement.drop(['index', 'adsh', 'ddate', 'qtrs', 'form'], axis=1)
# save as gzip file
financial_statement.to_parquet('./data/financial_statements.parquet.gzip', compression='gzip')
return financial_statement
def create_annual_data(tags):
"""
:param tags: parts of financial statement which should be considered
:return: returns annual data for all tags
"""
# final DataFrame
financial_statement = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 16 20:31:43 2020
@author: balajiramesh
"""
icd_cols=['PRINC_DIAG_CODE', 'OTH_DIAG_CODE_1', 'OTH_DIAG_CODE_2',
'OTH_DIAG_CODE_3', 'OTH_DIAG_CODE_4', 'OTH_DIAG_CODE_5',
'OTH_DIAG_CODE_6', 'OTH_DIAG_CODE_7', 'OTH_DIAG_CODE_8',
'OTH_DIAG_CODE_9', 'E_CODE_1', 'E_CODE_2', 'E_CODE_3', 'E_CODE_4',
'E_CODE_5']
icd_codes=sp.loc[:,icd_cols]
icd_codes=icd_codes.values.flatten()
icd_codes = icd_codes[~pd.isnull(icd_codes)]
icd_codes=pd.Series(icd_codes)
unique_codes=pd.Series(icd_codes.unique())
#check for icd 10 format
unique_codes[~unique_codes.str.match("([A-TV-Z][0-9][A-Z0-9](\.?[A-Z0-9]{0,4})?)")]
unique_codes[unique_codes.str.match("[^A-Z\d.]")]
#%%group by date and censustract
def groupAndCat(df):
grouped_tracts=df.groupby(['STMT_PERIOD_FROM_GROUPED', 'PAT_ADDR_CENSUS_TRACT']).size().reset_index()
grouped_tracts.columns = [*grouped_tracts.columns[:-1], 'Counts']
#remove zero counts groups
grouped_tracts=grouped_tracts.loc[grouped_tracts['Counts']>0,]
#%% merge aux files to see the counts
import pandas as pd
import glob
import os
files=glob.glob(r'Z:\Balaji\Analysis_out_IPOP\23122020\SVI_Cat_T4\*_aux.csv')
x=[]
for f in files:
df=pd.read_csv(f)
df["outcome"]=os.path.basename(f).replace("_aux.csv","")
x.append(df)
concat_df=pd.concat(x)
concat_df['folder']='SVI_Cat_T4'
concat_df.to_clipboard(index=False)
#%%
result_df=pd.DataFrame({'outcome':concat_df.outcome.unique()})
flood_cats=['NO','FLood_1']
for cat in flood_cats:
for period in concat_df.Time.unique():
cols=concat_df.loc[concat_df.Time==period,['outcome',cat]].rename(columns={cat:cat+'_'+period})
result_df=result_df.merge(cols,on='outcome',how='left')
result_df.to_clipboard(index=False)
#%%
#filtering zip codes
#using Zipcode
sp.loc[:,"ZIP5"]=sp.PAT_ZIP.str.slice(stop=5)
sp=sp.loc[~sp.ZIP5.isin(['0'*i for i in range(1,6)]),:]
one_var="ZIP5"
#%% read and merge sys
files=glob.glob('Z:\\SyS data\\*')
x=[pd.read_csv(f,encoding = "ISO-8859-1") for f in files]
result_df=pd.concat(x)
result_df.to_csv("Z:\\Balaji\\SyS data\\merged.csv",index=None)
#%%find unique tracts and counts in OP data
rm_df=df.loc[:,['Outcome','floodr','Time','year','month','weekday', 'PAT_AGE_YEARS','SEX_CODE','RACE','ETHNICITY','PAT_ADDR_CENSUS_TRACT']]
rm_df=rm_df.dropna()
rm_df.PAT_ADDR_CENSUS_TRACT.unique()
(rm_df.PAT_ADDR_CENSUS_TRACT//1000000).unique()
#%% checking how glm and gee poisson models works with offset
###############################
import random
df=pd.DataFrame(np.random.randint(0,2,size=(1000, 2)),columns=['x','y'])
df.y[df.x==1]=random.choices([0,1], [.2,.8],k= df.y[df.x==1].shape[0])
ct=pd.crosstab(df.y,df.x).values
#(390/(104+390))/(263/(263+243))
print(pd.crosstab(df.x,df.y))
orr= (ct[1,1]/(ct[1,1]+ct[0,1]))/(ct[1,0]/(ct[1,0]+ct[0,0]))
choices=[1000,4000]
offset=np.log(random.choices(choices, [.5,.5],k= 1000))
#%%
print('overall rr')
print(orr)
sub_df=df[offset==np.log(choices[0])]
ct=pd.crosstab(sub_df.y,sub_df.x).values
print('subdf 1 rr')
rr= (ct[1,1]/(ct[1,1]+ct[0,1]))/(ct[1,0]/(ct[1,0]+ct[0,0]))
print(rr)
sub_df=df[offset==np.log(choices[1])]
ct=pd.crosstab(sub_df.y,sub_df.x).values
print('subdf 2 rr')
rr= (ct[1,1]/(ct[1,1]+ct[0,1]))/(ct[1,0]/(ct[1,0]+ct[0,0]))
print(rr)
model = smf.gee(formula='y~x',groups=df.index, data=df,offset=None,family=sm.families.Poisson(link=sm.families.links.log()))
results=model.fit()
results_as_html = results.summary().tables[1].as_html()
reg_table=pd.read_html(results_as_html, header=0, index_col=0)[0].reset_index()
reg_table.loc[:,'coef']=np.exp(reg_table.coef)
reg_table.loc[:,['[0.025', '0.975]']]=np.exp(reg_table.loc[:,['[0.025', '0.975]']])
print('gee---------------')
print(reg_table)
model = smf.glm(formula='y~x',data=df,offset=None,family=sm.families.Poisson(link=sm.families.links.log()))
results=model.fit()
results_as_html = results.summary().tables[1].as_html()
reg_table=pd.read_html(results_as_html, header=0, index_col=0)[0].reset_index()
reg_table.loc[:,'coef']=np.exp(reg_table.coef)
reg_table.loc[:,['[0.025', '0.975]']]=np.exp(reg_table.loc[:,['[0.025', '0.975]']])
print('glm---------------')
print(reg_table)
print('with offset glm-------------')
model = smf.glm(formula='y~x',data=df,offset=offset,family=sm.families.Poisson(link=sm.families.links.log()))
results=model.fit()
results_as_html = results.summary().tables[1].as_html()
reg_table=pd.read_html(results_as_html, header=0, index_col=0)[0].reset_index()
reg_table.loc[:,'coef']=np.exp(reg_table.coef)
reg_table.loc[:,['[0.025', '0.975]']]=np.exp(reg_table.loc[:,['[0.025', '0.975]']])
print(reg_table)
#model = smf.logit(formula=formula, data=df,missing='drop')
#model = smf.glm(formula=formula, data=df,missing='drop',family=sm.families.Binomial(sm.families.links.logit()))
#%%looping for automatic saving
#floodr_use="DFO_R200" #['DFO_R200','DFO_R100','LIST_R20','DFO_R20','DFOuLIST_R20']
#nullAsZero="True" #null flood ratios are changed to 0
#floodZeroSep="True" # zeros are considered as seperate class
#flood_data_zip=None
#Dis_cats=["DEATH","Dehydration","Bite-Insect","Dialysis","Asthma_like","Respiratory_All","Infectious_and_parasitic"]
Dis_cats=[ 'ALL',
#'Psychiatric',
'Intestinal_infectious_diseases',
'ARI',
'Bite-Insect',
#'DEATH',
# #'Flood_Storms',
#'CO_Exposure',
#'Drowning',
#'Heat_Related_But_Not_dehydration',
# 'Hypothermia',
# #'Dialysis',
# #'Medication_Refill',
# 'Asthma',
'Pregnancy_complic',
'Chest_pain',
'Dehydration',
]
for exposure in ['triCloseProxDur',
'triDistMiles', 'hvyRainDur', 'totRainfall']:
print(exposure)
for Dis_cat in Dis_cats:
try:
print(Dis_cat)
print("-"*50)
run()
except Exception as e: print(e)
#%%
SVI_COLS=['SVI_Cat','SVI_Cat_T1', 'SVI_Cat_T2', 'SVI_Cat_T3', 'SVI_Cat_T4']
import os
for SVI_COL in SVI_COLS:
for FIL_COL in [1,2,3,4]:
#os.mkdir(SVI_COL)
#os.chdir(SVI_COL)
for Dis_cat in Dis_cats:
try:
print(Dis_cat)
print("-"*50)
run()
except Exception as e: print(e)
#os.chdir('..')
#%% pivot table for counts and mergingt the pivot tables
outcomes=['Asthma','Bite_Insect','CardiovascularDiseases','Dehydration','Diarrhea','Pregnancy_complic','Heat_Related_But_Not_dehydration']
sex=pd.pivot_table(data=df,index=['period','flood_binary'],values=outcomes,aggfunc='sum',columns=['Sex']).T.rename(columns=str).reset_index().rename(columns={'Sex':'cats'})
Ethnicity=pd.pivot_table(data=df,index=['period','flood_binary'],values=outcomes,aggfunc='sum',columns=['Ethnicity']).T.rename(columns=str).reset_index().rename(columns={'Ethnicity':'cats'})
Race=pd.pivot_table(data=df,index=['period','flood_binary'],values=outcomes,aggfunc='sum',columns=['Race']).T.rename(columns=str).reset_index().rename(columns={'Race':'cats'})
start=-1
df['AgeGrp']=pd.cut(df.Age,[start,5,17,50,64,200],labels=['0_5','6_17','18_50','51_64','gt64'])
age1=pd.pivot_table(data=df,index=['period','flood_binary'],values=['Dehydration'],aggfunc='sum',columns=['AgeGrp']).T.rename(columns=str).reset_index().rename(columns={'AgeGrp':'cats'})
df['AgeGrp']= | pd.cut(df.Age,[start,5,17,50,200],labels=['0_5','6_17','18_50','gt50']) | pandas.cut |
import csv
import pandas as pd
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import statsmodels.api as sm
df = pd.read_csv("./[Track1_데이터3] samp_cst_feat.csv")
X = df[1:]
df = df.drop(1,0)
data=[]
f = open('./[Track1_데이터3] samp_cst_feat.csv','r')
rdr = csv.reader(f)
for i in rdr:
data.append(i)
a=data[0]
del a[0]
df2 = pd.read_csv("./[Track1_데이터2] samp_train.csv")
featureColumns = a
df2 = df2.drop(1,0)
y = df2['MRC_ID_DI']
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.3)
#y.astype(int)
#X.astype(int)
y= list(y)
X= list(X)
def processSubset(x,y, feature_set):
model = sm.OLS(y,x) #modeling
regr = model.fit() #모델학습
AIC = regr.aic #모델의 AIC
return {"model" : regr , "AIC" : AIC}
print(processSubset(x=X_train,y=Y_train,feature_set= featureColumns))
#모든 조합을 다 조합해서 좋은 모델을 반환시키는 알고리즘
import time
import itertools
def getBest(x,y,k):
tic = time.time() #시작시간
results = [] #결과저장공간
for combo in itertools.combinations(x.columns.difference(['const']),k):
combo=(list(combo)+['const'])
#각 변수조합을 고려한 경우의 수
results.append(processSubset(x,y,feature_set=combo))#모델링된 것들을 저장
models=pd.DataFrame(results) #데이터 프레임으로 변환
#가장 낮은 AIC를 가지는 모델 선택 및 저장
bestModel = models.loc[models['AIC'].argmin()] #index
toc = time.time() #종료시간
print("Processed",models.shape[0],"models on",k,"predictors in",(toc-tic),
"seconds.")
return bestModel
#print(getBest(x=X_train,y=Y_train,k=2))
#변수 선택에 따른 학습시간과 저장 K 반복
models = pd.DataFrame(columns=["AIC","model"])
tic = time.time()
for i in range(1,4):
models.loc[i] = getBest(X_train,Y_train,i)
#toc = time.time()
#print("Total elapsed time : ",(toc-tic),"seconds")
#print(models)
#전진 선택법(Step=1)
def forward(x,y,predictors):
remainingPredictors = [p for p in x.columns.difference(['const'])
if p not in predictors]
tic=time.time()
results=[]
for p in remainingPredictors:
results.append(processSubset(x=x,y=y,feature_set=predictors+[p]+['const']))
#데이터프레임으로 변환
models = pd.DataFrame(results)
#AIC가 가장 낮은 것을 선택
bestModel = models.loc[models['AIC'].argmin()] #index
toc = time.time()
print("Processed ", models.shape[0],"models on", len(predictors)+1,
"predictors in",(toc-tic))
print("Selected predictors:",bestModel['model'].model.exog_names,
'AIC : ',bestModel[0])
return bestModel
#전진선택법 모델
def forward_model(x,y):
fModels = | pd.DataFrame(columns=["AIC","model"]) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
tamano_muestra = 120 #N
bandera_paso = False
iter = 0
lsupAnterior = -5
linfAnterior = -5
licentAnterior = -5
datos = pd.read_csv('data.csv', header=None)
articulos_defectuosos = datos
outLanterior = | pd.Series() | pandas.Series |
from qutip import *
from ..mf import *
import pandas as pd
from scipy.interpolate import interp1d
from copy import deepcopy
import matplotlib.pyplot as plt
def ham_gen_jc(params, alpha=0):
sz = tensor(sigmaz(), qeye(params.c_levels))
sm = tensor(sigmam(), qeye(params.c_levels))
a = tensor(qeye(2), destroy(params.c_levels)) + alpha
ham = (params.fc-params.fd)*a.dag()*a
ham += params.eps*(a+a.dag())
ham += 0.5*(params.f01-params.fd)*sz
ham += params.g*(a*sm.dag() + a.dag()*sm)
ham *= 2*np.pi
return ham
def c_ops_gen_jc(params, alpha=0):
c_ops = []
sm = tensor(sigmam(), qeye(params.c_levels))
a = tensor(qeye(2), destroy(params.c_levels)) + alpha
if params.gamma > 0.0:
c_ops.append(np.sqrt(2*np.pi*params.gamma*(1+params.n_t))*sm)
if params.n_t > 0:
c_ops.append(np.sqrt(2*np.pi*params.gamma*params.n_t)*sm.dag())
if params.gamma_phi > 0.0:
c_ops.append(np.sqrt(2*np.pi*params.gamma_phi)*sm.dag()*sm)
if params.kappa > 0.0:
c_ops.append(np.sqrt(2*np.pi*params.kappa*(1+params.n_c))*a)
if params.n_c > 0:
c_ops.append(np.sqrt(2*np.pi*params.kappa*params.n_c)*a.dag())
return c_ops
def iterative_alpha_calc(params, n_cycles=10, initial_alpha=0):
alpha = initial_alpha
try:
for idx in range(n_cycles):
ham = ham_gen_jc(params, alpha=alpha)
c_ops = c_ops_gen_jc(params, alpha=alpha)
rho = steadystate(ham, c_ops)
a = tensor(qeye(2), destroy(params.c_levels)) + alpha
a_exp = expect(a, rho)
alpha = a_exp
except:
alpha = None
return alpha
class Spectrum:
def __init__(self, parameters):
print('hello')
self.parameters = deepcopy(parameters)
self.mf_amplitude = None
self.me_amplitude = None
self.transmission_exp = None
self.hilbert_params = None
def iterative_calculate(self, fd_array, initial_alpha=0, n_cycles=10, prune=True):
if self.parameters.fc < self.parameters.f01:
change = 'hard'
else:
change = 'soft'
params = deepcopy(self.parameters)
fd_array = np.sort(fd_array)
a_array = np.zeros(fd_array.shape[0], dtype=complex)
alpha = initial_alpha
for fd_idx, fd in tqdm(enumerate(fd_array)):
params.fd = fd
alpha = iterative_alpha_calc(params, initial_alpha=alpha, n_cycles=n_cycles)
a_array[fd_idx] = alpha
if change is 'hard':
alpha_bright_iterative = pd.Series(a_array, index=fd_array, name='alpha_bright')
else:
alpha_dim_iterative = pd.Series(a_array, index=fd_array, name='alpha_dim')
fd_array = np.flip(fd_array)
a_array = np.zeros(fd_array.shape[0], dtype=complex)
alpha = initial_alpha
for fd_idx, fd in tqdm(enumerate(fd_array)):
params.fd = fd
alpha = iterative_alpha_calc(params, initial_alpha=alpha, n_cycles=n_cycles)
a_array[fd_idx] = alpha
if change is 'hard':
alpha_dim_iterative = pd.Series(a_array, index=fd_array, name='alpha_dim')
else:
alpha_bright_iterative = pd.Series(a_array, index=fd_array, name='alpha_bright')
if prune:
alpha_dim_iterative = alpha_dim_iterative.dropna()
alpha_bright_iterative = alpha_bright_iterative.dropna()
alpha_dim_iterative.sort_index(inplace=True)
alpha_bright_iterative.sort_index(inplace=True)
if change is 'hard':
# alpha_dim_diff = np.diff(alpha_dim_iterative)/np.diff(alpha_dim_iterative.index)
# first_dim_idx = np.argmax(np.abs(alpha_dim_diff)) + 1
first_dim_idx = np.argmax(alpha_dim_iterative.real)
alpha_dim_iterative = alpha_dim_iterative.iloc[first_dim_idx:]
# alpha_bright_diff = np.diff(alpha_bright_iterative) / np.diff(alpha_bright_iterative.index)
# last_bright_idx = np.argmax(np.abs(alpha_bright_diff))
last_bright_idx = np.argmin(alpha_bright_iterative.imag)
alpha_bright_iterative = alpha_bright_iterative.iloc[:last_bright_idx + 1]
else:
first_bright_idx = np.argmin(alpha_bright_iterative.imag)
alpha_bright_iterative = alpha_bright_iterative.iloc[first_bright_idx:]
last_dim_idx = np.argmin(alpha_dim_iterative.real)
alpha_dim_iterative = alpha_dim_iterative.iloc[:last_dim_idx+1]
self.iterative_amplitude = pd.concat([alpha_dim_iterative, alpha_bright_iterative], axis=1)
def gen_raw_hilbert_params(self, fd_array, c_levels):
self.hilbert_params = pd.DataFrame(np.zeros([fd_array.shape[0], 1]), index=fd_array, columns=['alpha_0'])
self.hilbert_params['c_levels'] = c_levels
def gen_iterative_hilbert_params(self, fd_limits, kind='linear', fill_value='extrapolate', fraction=0.5,
level_scaling=1.0, max_shift=False, max_levels=True, relative='dim', relative_crossover=None, c_levels_bistable=None):
if self.parameters.fc < self.parameters.f01:
change = 'hard'
else:
change = 'soft'
alpha_dim = self.iterative_amplitude['alpha_dim'].dropna()
# alpha_dim.sort_index(inplace=True)
# alpha_dim_diff = np.diff(alpha_dim)/np.diff(alpha_dim.index)
# first_dim_idx = np.argmax(np.abs(alpha_dim_diff)) + 1
# alpha_dim = alpha_dim.iloc[first_dim_idx:]
alpha_bright = self.iterative_amplitude['alpha_bright'].dropna()
# alpha_bright.sort_index(inplace=True)
# alpha_bright_diff = np.diff(alpha_bright) / np.diff(alpha_bright.index)
# last_bright_idx = np.argmax(np.abs(alpha_bright_diff))
# alpha_bright = alpha_bright.iloc[:last_bright_idx]
new_iterative_alphas = pd.concat([alpha_dim, alpha_bright], axis=1)
self.iterative_amplitude = new_iterative_alphas
alpha_dim_real_func = interp1d(alpha_dim.index, alpha_dim.real, kind=kind, fill_value=fill_value)
alpha_dim_imag_func = interp1d(alpha_dim.index, alpha_dim.imag, kind=kind, fill_value=fill_value)
def alpha_dim_func_single(fd):
alpha_dim = alpha_dim_real_func(fd) + 1j * alpha_dim_imag_func(fd)
return alpha_dim
alpha_dim_func_vec = np.vectorize(alpha_dim_func_single)
def alpha_dim_func(fd_array):
alpha_dim_array = alpha_dim_func_vec(fd_array)
alpha_dim_series = pd.Series(alpha_dim_array, index=fd_array, name='alpha_dim_func')
return alpha_dim_series
alpha_bright_real_func = interp1d(alpha_bright.index, alpha_bright.real, kind=kind,
fill_value=fill_value)
alpha_bright_imag_func = interp1d(alpha_bright.index, alpha_bright.imag, kind=kind,
fill_value=fill_value)
def alpha_bright_func_single(fd):
alpha_bright = alpha_bright_real_func(fd) + 1j * alpha_bright_imag_func(fd)
return alpha_bright
alpha_bright_func_vec = np.vectorize(alpha_bright_func_single)
def alpha_bright_func(fd_array):
alpha_bright_array = alpha_bright_func_vec(fd_array)
alpha_bright_series = | pd.Series(alpha_bright_array, index=fd_array, name='alpha_bright') | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 31 09:27:49 2019
@author: jenny
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
# Spliting data to trainning and testing set
from sklearn.model_selection import train_test_split, GridSearchCV
# Fitting Multiple Linear Regression to the trainning set
from sklearn.linear_model import LinearRegression, Lasso
from sklearn.cluster import KMeans
from sklearn.metrics import mean_squared_error, r2_score, classification_report
from sklearn import tree
# Matplotlib setting
def DT_evaluation(clf,x_axis,n_folds,X_train,X_test,y_train,y_test):
scores = clf.cv_results_['mean_test_score']
scores_std = clf.cv_results_['std_test_score']
std_error = scores_std / np.sqrt(n_folds)
plt.figure()
plt.plot(x_axis, scores + std_error, 'b--o', markersize=3)
plt.plot(x_axis, scores - std_error, 'b--o', markersize=3)
plt.plot(x_axis, scores,color='black', marker='o',
markerfacecolor='blue', markersize=5)
plt.fill_between(x_axis, scores + std_error, scores - std_error, alpha=0.2)
plt.xlabel('Maximum tree depth')
plt.ylabel('Cross validation score +/- std error')
plt.title('Cross validation results')
pred_train = clf.predict(X_train)
pred_test = clf.predict(X_test)
print('Classification report for training data: \n', classification_report(y_train, pred_train))
print('Classification report for test data: \n', classification_report(y_test, pred_test))
print('The best choice of depth: ' + str(clf.best_params_['max_depth']))
# source: https://scikit-learn.org/stable/auto_examples/exercises/plot_cv_diabetes.html#sphx-glr-auto-examples-exercises-plot-cv-diabetes-py
if __name__ == "__main__":
# DATA PREPROCESSING
# Import dataset
rating_dataset = | pd.read_csv('./data/ratings.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
import pytest
from evalml import AutoMLSearch
from evalml.objectives import CostBenefitMatrix
def test_cbm_init():
with pytest.raises(
ValueError, match="Parameters to CostBenefitMatrix must all be numeric values."
):
CostBenefitMatrix(
true_positive=None, true_negative=-1, false_positive=-7, false_negative=-2
)
with pytest.raises(
ValueError, match="Parameters to CostBenefitMatrix must all be numeric values."
):
CostBenefitMatrix(
true_positive=1, true_negative=-1, false_positive=None, false_negative=-2
)
with pytest.raises(
ValueError, match="Parameters to CostBenefitMatrix must all be numeric values."
):
CostBenefitMatrix(
true_positive=1, true_negative=None, false_positive=-7, false_negative=-2
)
with pytest.raises(
ValueError, match="Parameters to CostBenefitMatrix must all be numeric values."
):
CostBenefitMatrix(
true_positive=3, true_negative=-1, false_positive=-7, false_negative=None
)
@pytest.mark.parametrize("optimize_thresholds", [True, False])
def test_cbm_objective_automl(optimize_thresholds, X_y_binary):
X, y = X_y_binary
cbm = CostBenefitMatrix(
true_positive=10, true_negative=-1, false_positive=-7, false_negative=-2
)
automl = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
objective=cbm,
max_iterations=2,
optimize_thresholds=optimize_thresholds,
)
automl.search()
pipeline = automl.best_pipeline
pipeline.fit(X, y)
predictions = pipeline.predict(X, cbm)
assert not np.isnan(predictions).values.any()
assert not np.isnan(pipeline.predict_proba(X)).values.any()
assert not np.isnan(pipeline.score(X, y, [cbm])["Cost Benefit Matrix"])
@pytest.mark.parametrize("data_type", ["ww", "pd"])
def test_cbm_objective_function(data_type, make_data_type):
y_true = pd.Series([0, 0, 0, 1, 1, 1, 1, 1, 1, 1])
y_predicted = pd.Series([0, 0, 1, 0, 0, 0, 0, 1, 1, 1])
y_true = make_data_type(data_type, y_true)
y_predicted = make_data_type(data_type, y_predicted)
cbm = CostBenefitMatrix(
true_positive=10, true_negative=-1, false_positive=-7, false_negative=-2
)
assert np.isclose(
cbm.objective_function(y_true, y_predicted),
((3 * 10) + (-1 * 2) + (1 * -7) + (4 * -2)) / 10,
)
def test_cbm_objective_function_floats():
y_true = pd.Series([0, 0, 0, 1, 1, 1, 1, 1, 1, 1])
y_predicted = pd.Series([0, 0, 1, 0, 0, 0, 0, 1, 1, 1])
cbm = CostBenefitMatrix(
true_positive=5.1, true_negative=-1.2, false_positive=-6.7, false_negative=-0.1
)
assert np.isclose(
cbm.objective_function(y_true, y_predicted),
((3 * 5.1) + (-1.2 * 2) + (1 * -6.7) + (4 * -0.1)) / 10,
)
def test_cbm_input_contains_nan(X_y_binary):
y_predicted = pd.Series([np.nan, 0, 0])
y_true = pd.Series([1, 2, 1])
cbm = CostBenefitMatrix(
true_positive=10, true_negative=-1, false_positive=-7, false_negative=-2
)
with pytest.raises(ValueError, match="y_predicted contains NaN or infinity"):
cbm.score(y_true, y_predicted)
y_true = pd.Series([np.nan, 0, 0])
y_predicted = pd.Series([1, 2, 0])
with pytest.raises(ValueError, match="y_true contains NaN or infinity"):
cbm.score(y_true, y_predicted)
def test_cbm_input_contains_inf(capsys):
cbm = CostBenefitMatrix(
true_positive=10, true_negative=-1, false_positive=-7, false_negative=-2
)
y_predicted = np.array([np.inf, 0, 0])
y_true = np.array([1, 0, 0])
with pytest.raises(ValueError, match="y_predicted contains NaN or infinity"):
cbm.score(y_true, y_predicted)
y_true = pd.Series([np.inf, 0, 0])
y_predicted = pd.Series([1, 0, 0])
with pytest.raises(ValueError, match="y_true contains NaN or infinity"):
cbm.score(y_true, y_predicted)
def test_cbm_different_input_lengths():
cbm = CostBenefitMatrix(
true_positive=10, true_negative=-1, false_positive=-7, false_negative=-2
)
y_predicted = pd.Series([0, 0])
y_true = pd.Series([1])
with pytest.raises(ValueError, match="Inputs have mismatched dimensions"):
cbm.score(y_true, y_predicted)
y_true = | pd.Series([0, 0]) | pandas.Series |
import pandas as pd
import numpy as np
import re
from datetime import timedelta as timedelta_t
from typing import Union, List, Set, Dict
from dataclasses import dataclass
from qlearn.core.utils import infer_series_frequency, _check_frame_columns
@dataclass
class DataType:
# data type: multi, ticks, ohlc
type: str
symbols: List[str]
freq: str
subtypes: Set[str]
def frequency(self):
return | pd.Timedelta(self.freq) | pandas.Timedelta |
"""
This is Flask Application for ML.
"""
import pickle
from flask import Flask, jsonify, request
import pandas as pd
# load model
MODEL = pickle.load(open('iotmodel.pkl', 'rb'))
# app
APP = Flask(__name__)
# routes
@APP.route('/', methods=['POST'])
def predict():
"""
To predit the data
"""
# get data
data = request.get_json(force=True)
# convert data into dataframe
data.update((x, [y]) for x, y in data.items())
data_df = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
#Python 2.7.9 (default, Apr 5 2015, 22:21:35)
# full env in environment.yml
import sys, os
'''
This is a full aggregation of the Pulsar Hunters project, including user weighting.
Note it's quite a simple project - basically one Yes/No question - and there is gold-standard data, so the weighting is relatively straightforward and the aggregation is just determining a single fraction for each subject.
For an example of an aggregation of a much more complex question tree, check out scripts for Galaxy Zoo. The user weighting in that project is also completely different.
Hopefully this is well-enough commented below to be useful for others.
--BDS
'''
# file with raw classifications (csv) needed
# put this way up here so if there are no inputs we exit quickly before even trying to load everything else
try:
classfile_in = sys.argv[1]
except:
#classfile_in = 'pulsar-hunters-classifications_first500k.csv'
# just a shout-out to whoever changed Panoptes so that the export filenames
# are human-readable instead of their previous format. Thank you
#classfile_in = 'data/2e3d12a2-56ca-4d1f-930a-9ecc7fd39885.csv'
print("\nUsage: %s classifications_infile [weight_class aggregations_outfile]" % sys.argv[0])
print(" classifications_infile is a Zooniverse (Panoptes) classifications data export CSV.")
print(" weight_class is 1 if you want to calculate and apply user weightings, 0 otherwise.")
print(" aggregations_outfile is the name of the file you want written. If you don't specify,")
print(" the filename is %s by default." % outfile_default)
sys.exit(0)
import numpy as np # using 1.10.1
import pandas as pd # using 0.13.1
#import datetime
#import dateutil.parser
import json
############ Define files and settings below ##############
# default outfile
outfile_default = 'pulsar_aggregations.csv'
rankfile_stem = 'subjects_ranked_by_weighted_class_asof_'
# file with tags left in Talk, for value-added columns below
talk_export_file = "helperfiles/project-764-tags_2016-01-15.json"
# file with master list between Zooniverse metadata image filename (no source coords) and
# original filename with source coords and additional info
# also I get to have a variable that uses "filename" twice where each means a different thing
# a filename for a file full of filenames #alliterationbiyotch
filename_master_list_filename = "helperfiles/HTRU-N_sets_keys.csv"
# this is a list of possible matches to known pulsars that was done after the fact so they
# are flagged as "cand" in the database instead of "known" etc.
poss_match_file = 'helperfiles/PossibleMatches.csv'
# later we will select on tags by the project team and possibly weight them differently
# note I've included the moderators and myself (though I didn't tag anything).
# Also note it's possible to do this in a more general fashion using a file with project users and roles
# However, hard-coding seemed the thing to do given our time constraints (and the fact that I don't think
# you can currently export the user role file from the project builder)
project_team = 'bretonr jocelynbb spindizzy Simon_Rookyard <NAME>_ilie jamesy23 <NAME> walkcr <NAME> benjamin_shaw bhaswati djchampion jwbmartin bstappers ElisabethB Capella05 vrooje'.split()
# define the active workflow - we will ignore all classifications not on this workflow
# we could make this an input but let's not get too fancy for a specific case.
# for beta test
#active_workflow_id = 1099
#active_workflow_major = 6
# for live project
active_workflow_id = 1224
active_workflow_major = 4
# do we want sum(weighted vote count) = sum(raw vote count)?
normalise_weights = True
# do we want to write an extra file with just classification counts and usernames
# (and a random color column, for treemaps)?
counts_out = True
counts_out_file = 'class_counts_colors.csv'
############ Set the other inputs now ###############
try:
apply_weight = int(sys.argv[2])
except:
apply_weight = 0
try:
outfile = sys.argv[3]
except:
outfile = outfile_default
#################################################################################
#################################################################################
#################################################################################
# This is the function that actually does the aggregating
def aggregate_class(grp):
# translate the group to a dataframe because FML if I don't (some indexing etc is different)
thegrp = pd.DataFrame(grp)
# figure out what we're looping over below
answers = thegrp.pulsar_classification.unique()
# aggregating is a matter of grouping by different answers and summing the counts/weights
byans = thegrp.groupby('pulsar_classification')
ans_ct_tot = byans['count'].aggregate('sum')
ans_wt_tot = byans['weight'].aggregate('sum')
# we want fractions eventually, so we need denominators
count_tot = np.sum(ans_ct_tot) # we could also do len(thegrp)
weight_tot = np.sum(ans_wt_tot)
# okay, now we should have a series of counts for each answer, one for weighted counts, and
# the total votes and weighted votes for this subject.
# now loop through the possible answers and create the raw and weighted vote fractions
# and save the counts as well.
# this is a list for now and we'll make it into a series and order the columns later
class_agg = {}
class_agg['count_unweighted'] = count_tot
class_agg['count_weighted'] = weight_tot
class_agg['subject_type'] = thegrp.subject_type.unique()[0]
class_agg['filename'] = thegrp.filename.unique()[0]
for a in answers:
# don't be that jerk who labels things with "p0" or otherwise useless internal indices.
# Use the text of the response next to this answer choice in the project builder (but strip spaces)
raw_frac_label = ('p_'+a).replace(' ', '_')
wt_frac_label = ('p_'+a+'_weight').replace(' ', '_')
class_agg[raw_frac_label] = ans_ct_tot[a]/float(count_tot)
class_agg[wt_frac_label] = ans_wt_tot[a]/float(weight_tot)
# oops, this is hard-coded so that there's Yes and No as answers - sorry to those trying to generalise
col_order = ["filename", "p_Yes", "p_No", "p_Yes_weight", "p_No_weight",
"count_unweighted", "count_weighted", "subject_type"]
return pd.Series(class_agg)[col_order]
#################################################################################
#################################################################################
#################################################################################
# The new weighting assignment function allows the user to choose between different weighting schemes
# though note the one in this function is not preferred for reasons explained below.
def assign_weight_old(seed):
# keep the two seed cases separate because we might want to use a different base for each
if seed < 0.:
return max([0.05, pow(1.0025, seed)])
elif seed > 0:
return min([3.0, pow(1.0025, seed)])
else:
return 1.0
# assigns a weight based on a seed parameter
# The weight is assigned using the seed as an exponent and the number below as the base.
# The number is just slightly offset from 1 so that it takes many classifications for
# a user's potential weight to cap out at the max weight (3) or bottom out at the min (0.05).
# Currently there are 641 "known" pulsars in the DB so the base of 1.025 is largely based on that.
# Update: there are now about 5,000 simulated pulsars in the subject set as well, and they have a
# much higher retirement limit, so that more people will have classified them and we have more info.
# Note I'd rather this did a proper analysis with a confusion matrix etc but under a time crunch
# we went with something simpler.
def assign_weight(q, which_weight):
# the floor weight for the case of which_weight == 2
# i.e. someone who has seed = 0 will have this
# seed = 0 could either be equal numbers right & wrong, OR that we don't have any information
c0 = 0.5
seed = q[1].seed
n_gs = q[1].n_gs
# Two possible weighting schemes:
# which_weight == 1: w = 1.0025^(seed), bounded between 0.05 and 3.0
# which_weight == 2: w = (1 + log n_gs)^(seed/n_gs), bounded between 0.05 and 3.0
#
# Weighting Scheme 1:
# this is an okay weighting scheme, but it doesn't account for the fact that someone might be prolific
# but not a very good classifier, and those classifiers shouldn't have a high weight.
# Example: Bob does 10000 gold-standard classifications and gets 5100 right, 4900 wrong.
# In this weighting scheme, Bob's weighting seed is +100, which means a weight of 1.0025^100 = 1.3,
# despite the fact that Bob's classifications are consistent with random within 1%.
# The weighting below this one would take the weight based on 100/10000, which is much better.
if which_weight == 1:
# keep the two seed cases separate because we might want to use a different base for each
if seed < 0.:
return max([0.05, pow(1.0025, seed)])
elif seed > 0:
return min([3.0, pow(1.0025, seed)])
else:
return 1.0
elif which_weight == 2:
if n_gs < 1: # don't divide by or take the log of 0
# also if they didn't do any gold-standard classifications assume they have the default weight
return c0
else:
# note the max of 3 is unlikely to be reached, but someone could hit the floor.
return min([3.0, max([0.05, c0*pow((1.0 + np.log10(n_gs)), (float(seed)/float(n_gs)))])])
else:
# unweighted - so maybe don't even enter this function if which_weight is not 1 or 2...
return 1.0
#################################################################################
#################################################################################
#################################################################################
# Get the Gini coefficient - https://en.wikipedia.org/wiki/Gini_coefficient
# Typical values of the Gini for healthy Zooniverse projects (Cox et al. 2015) are
# in the range of 0.7-0.9.
def gini(list_of_values):
sorted_list = sorted(list_of_values)
height, area = 0, 0
for value in sorted_list:
height += value
area += height - value / 2.
fair_area = height * len(list_of_values) / 2
return (fair_area - area) / fair_area
#################################################################################
#################################################################################
#################################################################################
# assign a color randomly if logged in, gray otherwise
def randcolor(user_label):
if user_label.startswith('not-logged-in-'):
# keep it confined to grays, i.e. R=G=B and not too bright, not too dark
g = random.randint(25,150)
return '#%02X%02X%02X' % (g,g,g)
#return '#555555'
else:
# the lambda makes this generate a new int every time it's called, so that
# in general R != G != B below.
r = lambda: random.randint(0,255)
return '#%02X%02X%02X' % (r(),r(),r())
#################################################################################
#################################################################################
#################################################################################
# These are functions that extract information from the various JSONs that are
# included in the classification exports. To Do: optimise these so that one .apply()
# call will extract them for everything without so many &^%@$ing loops.
def get_subject_type(q):
try:
return q[1].subject_json[q[1].subject_id]['#Class']
except:
return "cand"
def get_filename(q):
try:
return q[1].subject_json[q[1].subject_id]['CandidateFile']
except:
try:
return q[1].subject_json[q[1].subject_id]['CandidateFileVertical']
except:
try:
return q[1].subject_json[q[1].subject_id]['CandidateFileHorizontal']
except:
return "filenotfound.png"
# get number of gold-standard classifications completed by a user (used if weighting)
def get_n_gs(thegrp):
return sum(pd.DataFrame(thegrp).seed != 0)
# Something went weird with IP addresses, so use more info to determine unique users
# Note the user_name still has the IP address in it if the user is not logged in;
# it's just that for this specific project it's not that informative.
def get_alternate_sessioninfo(row):
# if they're logged in, save yourself all this trouble
if not row[1]['user_name'].startswith('not-logged-in'):
return row[1]['user_name']
else:
metadata = row[1]['meta_json']
# IP + session, if it exists
# (IP, agent, viewport_width, viewport_height) if session doesn't exist
try:
# start with "not-logged-in" so stuff later doesn't break
return str(row[1]['user_name']) +"_"+ str(metadata['session'])
except:
try:
viewport = str(metadata['viewport'])
except:
viewport = "NoViewport"
try:
user_agent = str(metadata['user_agent'])
except:
user_agent = "NoUserAgent"
try:
user_ip = str(row[1]['user_name'])
except:
user_ip = "NoUserIP"
thesession = user_ip + user_agent + viewport
return thesession
#################################################################################
#################################################################################
#################################################################################
# Print out the input parameters just as a sanity check
print("Computing aggregations using:")
print(" infile: %s" % classfile_in)
print(" weighted? %d" % apply_weight)
print(" Will print to %s after processing." % outfile)
#################################################################################
#################################################################################
#################################################################################
#
#
#
#
# Begin the main work
#
#
#
#
print("Reading classifications from %s ..." % classfile_in)
classifications = pd.read_csv(classfile_in) # this step can take a few minutes for a big file
# Talk tags are not usually huge files so this doesn't usually take that long
print("Parsing Talk tag file for team tags %s ..." % talk_export_file)
talkjson = json.loads(open(talk_export_file).read())
talktags_all = pd.DataFrame(talkjson)
# we only care about the Subject comments here, not discussions on the boards
# also we only care about tags by the research team & moderators
talktags = talktags_all[(talktags_all.taggable_type == "Subject") & (talktags_all.user_login.isin(project_team))].copy()
# make a username-tag pair column
# subject id is a string in the classifications array so force it to be one here or the match won't work
talktags['subject_id'] = [str(int(q)) for q in talktags.taggable_id]
talktags["user_tag"] = talktags.user_login+": #"+talktags.name+";"
# when we're talking about Subject tags, taggable_id is subject_id
talk_bysubj = talktags.groupby('subject_id')
# this now contains all the project-team-written tags on each subject, 1 row per subject
subj_tags = pd.DataFrame(talk_bysubj.user_tag.unique())
# if we need this as an explicit column
#subj_tags['subject_id'] = subj_tags.index
# likewise reading this matched files doesn't take long even though we have a for loop.
print("Reading master list of matched filenames %s..." % filename_master_list_filename)
matched_filenames = pd.read_csv(filename_master_list_filename)
print("Reading from list of possible matches to known pulsars %s..." % poss_match_file)
# ['Zooniverse name', 'HTRU-N name', 'Possible source']
possible_knowns = pd.read_csv(poss_match_file)
possible_knowns['is_poss_known'] = [True for q in possible_knowns['Possible source']]
# This section takes quite a while and it's because we have so many for loops, which I think is
# in part because reading out of a dict from a column in a DataFrame needs loops when done this way
# and in part because we were in a rush.
# I think it's possible we could pass this to a function and reshape things there, then return
# a set of new columns - but I didn't have time to figure that out under the deadlines we had.
print("Making new columns and getting user labels...")
# first, extract the started_at and finished_at from the annotations column
classifications['meta_json'] = [json.loads(q) for q in classifications.metadata]
classifications['started_at_str'] = [q['started_at'] for q in classifications.meta_json]
classifications['finished_at_str'] = [q['finished_at'] for q in classifications.meta_json]
# we need to set up a new user id column that's login name if the classification is while logged in,
# session if not (right now "user_name" is login name or hashed IP and, well, read on...)
# in this particular run of this particular project, session is a better tracer of uniqueness than IP
# for anonymous users, because of a bug with some back-end stuff that someone else is fixing
# but we also want to keep the user name if it exists, so let's use this function
#classifications['user_label'] = [get_alternate_sessioninfo(q) for q in classifications.iterrows()]
classifications['user_label'] = [get_alternate_sessioninfo(q) for q in classifications['user_name meta_json'.split()].iterrows()]
classifications['created_day'] = [q[:10] for q in classifications.created_at]
# Get subject info into a format we can actually use
classifications['subject_json'] = [json.loads(q) for q in classifications.subject_data]
'''
ALERT: I think they may have changed the format of the subject_dict such that later projects will have a different structure to this particular json.
That will mean you'll have to adapt this part. Sorry - but hopefully it'll use the format that I note below I wish it had, or something similarly simple.
'''
# extract the subject ID because that's needed later
# Note the subject ID becomes the *index* of the dict, which is actually pretty strange versus
# everything else in the export, and I'd really rather it be included here as "subject_id":"1234567" etc.
#
# You can isolate the keys as a new column but then it's a DictKey type, but stringifying it adds
# all these other characters that you then have to take out. Thankfully all our subject IDs are numbers
# this is a little weird and there must be a better way but... it works
classifications['subject_id'] = [str(q.keys()).replace("dict_keys(['", "").replace("'])", '') for q in classifications.subject_json]
# extract retired status, though not sure we're actually going to use it.
# also, what a mess - you have to extract the subject ID first and then use it to call the subject_json. UGH
# update: we didn't use it and each of these lines takes ages, so commenting it out
#classifications['retired'] = [q[1].subject_json[q[1].subject_id]['retired'] for q in classifications.iterrows()]
# Get annotation info into a format we can actually use
# these annotations are just a single yes or no question, yay
classifications['annotation_json'] = [json.loads(q) for q in classifications.annotations]
classifications['pulsar_classification'] = [q[0]['value'] for q in classifications.annotation_json]
# create a weight parameter but set it to 1.0 for all classifications (unweighted) - may change later
classifications['weight'] = [1.0 for q in classifications.workflow_version]
# also create a count parameter, because at the time of writing this .aggregate('count') was sometimes off by 1
classifications['count'] = [1 for q in classifications.workflow_version]
#######################################################
# discard classifications not in the active workflow #
#######################################################
print("Picking classifications from the active workflow (id %d, version %d.*)" % (active_workflow_id, active_workflow_major))
# use any workflow consistent with this major version, e.g. 6.12 and 6.23 are both 6 so they're both ok
# also check it's the correct workflow id
the_active_workflow = [int(q) == active_workflow_major for q in classifications.workflow_version]
this_workflow = classifications.workflow_id == active_workflow_id
in_workflow = this_workflow & the_active_workflow
# note I haven't saved the full DF anywhere because of memory reasons, so if you're debugging:
# classifications_all = classifications.copy()
classifications = classifications[in_workflow]
print("Extracting filenames and subject types...")
# extract whether this is a known pulsar or a candidate that needs classifying - that info is in the
# "#Class" column in the subject metadata (where # means it can't be seen by classifiers).
# the options are "cand" for "candidate", "known" for known pulsar, "disc" for a pulsar that has been
# discovered by this team but is not yet published
# do this after you choose a workflow because #Class doesn't exist for the early subjects so it will break
# also don't send the entirety of classifications into the function, to save memory
#classifications['subject_type'] = [get_subject_type(q) for q in classifications.iterrows()]
#classifications['filename'] = [get_filename(q) for q in classifications.iterrows()]
classifications['subject_type'] = [get_subject_type(q) for q in classifications['subject_id subject_json'.split()].iterrows()]
classifications['filename'] = [get_filename(q) for q in classifications['subject_id subject_json'.split()].iterrows()]
# Let me just pause a second to rant again about the fact that subject ID is the index of the subject_json.
# Because of that, because the top-level access to that was-json-now-a-dict requires the subject id rather than
# just being label:value pairs, I have to do an iterrows() and send part of the entire classifications DF into
# a loop so that I can simultaneously access each subject ID *and* the dict, rather than just accessing the
# info from the dict directly, which would be much faster.
# this might be useful for a sanity check later
# first_class_day = min(classifications.created_day).replace(' ', '')
# last_class_day = max(classifications.created_day).replace(' ', '')
# for some reason this is reporting last-classification dates that are days after the actual last
# classification. Not sure? Might be because this is a front-end reporting, so if someone has set
# their computer's time wrong we could get the wrong time here.
# could fix that by using created_at but ... I forgot.
last_class_time = max(classifications.finished_at_str)[:16].replace(' ', '_').replace('T', '_').replace(':', 'h')+"m"
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #
#######################################################
# Apply weighting function (or don't) #
#######################################################
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #
classifications['seed'] = [0 for q in classifications.weight]
classifications['is_gs'] = [0 for q in classifications.weight]
if apply_weight > 0:
print(" Computing user weights...")
# for now this is assuming all subjects marked as "known" or "disc" are pulsars
# and also "fake" are simulated pulsars
is_known = (classifications.subject_type == 'known') | (classifications.subject_type == 'disc') | (classifications.subject_type == 'fake')
#is_candidate = np.invert(is_known)
# if it's a non-gold-standard classification, mark it
classifications.loc[is_known, 'is_gs'] = 1
ok_incr = 1.0 # upweight if correct
oops_incr = -2.0 # downweight more if incorrect
# find the correct classifications of known pulsars
ok_class = (is_known) & (classifications.pulsar_classification == 'Yes')
# find the incorrect classifications of known pulsars
oops_class = (is_known) & (classifications.pulsar_classification == 'No')
# set the individual seeds
classifications.loc[ok_class, 'seed'] = ok_incr
classifications.loc[oops_class, 'seed'] = oops_incr
# then group classifications by user name, which will weight logged in as well as not-logged-in (the latter by session)
by_user = classifications.groupby('user_label')
# get the user's summed seed, which goes into the exponent for the weight
user_exp = by_user.seed.aggregate('sum')
# then set up the DF that will contain the weights etc, and fill it
user_weights = pd.DataFrame(user_exp)
user_weights.columns = ['seed']
user_weights['user_label'] = user_weights.index
user_weights['nclass_user'] = by_user['count'].aggregate('sum')
user_weights['n_gs'] = by_user['is_gs'].aggregate('sum')
user_weights['weight'] = [assign_weight(q, apply_weight) for q in user_weights.iterrows()]
#user_weights['weight'] = [assign_weight_old(q) for q in user_exp]
# if you want sum(unweighted classification count) == sum(weighted classification count), do this
if normalise_weights:
user_weights.weight *= float(len(classifications))/float(sum(user_weights.weight * user_weights.nclass_user))
# weights are assigned, now need to match them up to the main classifications table
# making sure that this weight keeps the name 'weight' and the other gets renamed (suffixes flag)
# if assign_weight == 0 then we won't enter this loop and the old "weights" will stay
# as they are, i.e. == 1 uniformly.
classifications_old = classifications.copy()
classifications = pd.merge(classifications_old, user_weights, how='left',
on='user_label',
sort=False, suffixes=('_2', ''), copy=True)
else:
# just make a collated classification count array so we can print it to the screen
by_user = classifications.groupby('user_label')
user_exp = by_user.seed.aggregate('sum')
user_weights = pd.DataFrame(user_exp)
user_weights.columns = ['seed']
#user_weights['user_label'] = user_weights.index
user_weights['nclass_user'] = by_user['count'].aggregate('sum')
user_weights['n_gs'] = by_user['is_gs'].aggregate('sum')
# UNWEIGHTED
user_weights['weight'] = [1 for q in user_exp]
# grab basic stats
n_subj_tot = len(classifications.subject_data.unique())
by_subject = classifications.groupby('subject_id')
subj_class = by_subject.created_at.aggregate('count')
all_users = classifications.user_label.unique()
n_user_tot = len(all_users)
n_user_unreg = sum([q.startswith('not-logged-in-') for q in all_users])
# obviously if we didn't weight then we don't need to get stats on weights
if apply_weight > 0:
user_weight_mean = np.mean(user_weights.weight)
user_weight_median = np.median(user_weights.weight)
user_weight_25pct = np.percentile(user_weights.weight, 25)
user_weight_75pct = np.percentile(user_weights.weight, 75)
user_weight_min = min(user_weights.weight)
user_weight_max = max(user_weights.weight)
nclass_mean = np.mean(user_weights.nclass_user)
nclass_median = np.median(user_weights.nclass_user)
nclass_tot = len(classifications)
user_weights.sort_values(['nclass_user'], ascending=False, inplace=True)
# If you want to print out a file of classification counts per user, with colors for making a treemap
# honestly I'm not sure why you wouldn't want to print this, as it's very little extra effort
if counts_out == True:
print("Printing classification counts to %s..." % counts_out_file)
user_weight['color'] = [randcolor(q) for q in user_weight.index]
user_weight.to_csv(counts_out_file)
## ## ## ## ## ## ## ## ## ## ## ## ## ## #
#######################################################
# Print out basic project info #
#######################################################
## ## ## ## ## ## ## ## ## ## ## ## ## ## #
print("%d classifications from %d users, %d registered and %d unregistered.\n" % (nclass_tot, n_user_tot, n_user_tot - n_user_unreg, n_user_unreg))
print("Mean n_class per user %.1f, median %.1f." % (nclass_mean, nclass_median))
if apply_weight > 0:
print("Mean user weight %.3f, median %.3f, with the middle 50 percent of users between %.3f and %.3f." % (user_weight_mean, user_weight_median, user_weight_25pct, user_weight_75pct))
print("The min user weight is %.3f and the max user weight is %.3f.\n" % (user_weight_min, user_weight_max))
cols_print = 'nclass_user weight'.split()
else:
cols_print = 'nclass_user'
# don't make this leaderboard public unless you want to gamify your users in ways we already know
# have unintended and sometimes negative consequences. This is just for your information.
print("Classification leaderboard:")
print(user_weights[cols_print].head(20))
print("Gini coefficient for project: %.3f" % gini(user_weight['nclass_user']))
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #
#######################################################
# Aggregate classifications, unweighted and weighted #
#######################################################
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #
print("\nAggregating classifications...\n")
class_agg = by_subject['weight count pulsar_classification subject_type filename'.split()].apply(aggregate_class)
# really ought to replace all the NaNs with 0.0
#######################################################
# Write to files #
#######################################################
#
# add value-added columns
#
# let people look up the subject on Talk directly from the aggregated file
class_agg['link'] = ['https://www.zooniverse.org/projects/zooniverse/pulsar-hunters/talk/subjects/'+str(q) for q in class_agg.index]
# after we do the merges below the new indices might not be linked to the subject id, so save it explicitly
class_agg['subject_id'] = [str(q) for q in class_agg.index]
# match up all the ancillary file data. Maybe there's a faster way to do this than with a chain but meh,
# it's actually not *that* slow compared to the clusterf*ck of for loops in the column assignment part above
class_agg_old = class_agg.copy()
class_agg_interm = pd.merge(class_agg_old, subj_tags, how='left', left_index=True, right_index=True, sort=False, copy=True)
class_agg_interm2 = pd.merge(class_agg_interm, matched_filenames, how='left', left_on='filename', right_on='Pulsar Hunters File', sort=False, copy=True)
class_agg = pd.merge(class_agg_interm2, possible_knowns, how='left', left_on='filename', right_on='Zooniverse name', sort=False, copy=True)
# fill in the is_poss_known column with False where it is currently NaN
# currently it's either True or NaN -- with pd.isnull NaN becomes True and True becomes False, so invert that.
class_agg['is_poss_known'] = np.invert(pd.isnull(class_agg['is_poss_known']))
# make the list ranked by p_Yes_weight
class_agg.sort_values(['subject_type','p_Yes_weight'], ascending=False, inplace=True)
print("Writing aggregated output to file %s...\n" % outfile)
pd.DataFrame(class_agg).to_csv(outfile)
# Now make files ranked by p_Yes, one with all subjects classified and one with only candidates
# /Users/vrooje/anaconda/bin/ipython:1: FutureWarning: sort(columns=....) is deprecated, use sort_values(by=.....)
# #!/bin/bash /Users/vrooje/anaconda/bin/python.app
#class_agg.sort('p_Yes_weight', ascending=False, inplace=True)
class_agg.sort_values(['p_Yes_weight'], ascending=False, inplace=True)
# I'd rather note the last classification date than the date we happen to produce the file
# rightnow = datetime.datetime.now().strftime('%Y-%M-%D_%H:%M')
# rankfile_all = rankfile_stem + rightnow + ".csv"
rankfile_all = 'all_'+rankfile_stem + last_class_time + ".csv"
# there go those hard-coded columns again
rank_cols = ['subject_id', 'filename', 'p_Yes_weight', 'count_weighted', 'p_Yes', 'count_unweighted', 'subject_type', 'link', 'user_tag', 'HTRU-N File']
print("Writing full ranked list to file %s...\n" % rankfile_all)
# write just the weighted yes percentage, the weighted count, the subject type, and the link to the subject page
# the subject ID is the index so it will be written anyway
pd.DataFrame(class_agg[rank_cols]).to_csv(rankfile_all)
rankfile = 'cand_allsubj_'+rankfile_stem + last_class_time + ".csv"
print("Writing candidate-only ranked list to file %s...\n" % rankfile)
# also only include entries where there were at least 5 weighted votes tallied
# and only "cand" subject_type objects
classified_candidate = (class_agg.count_weighted > 5) & (class_agg.subject_type == 'cand')
pd.DataFrame(class_agg[rank_cols][classified_candidate]).to_csv(rankfile)
rankfile_unk = 'cand_'+rankfile_stem + last_class_time + ".csv"
print("Writing candidate-only, unknown-only ranked list to file %s...\n" % rankfile_unk)
# also only include entries where there were at least 5 weighted votes tallied
# and only "cand" subject_type objects
classified_unknown_candidate = (classified_candidate) & (np.invert(class_agg.is_poss_known))
| pd.DataFrame(class_agg[rank_cols][classified_unknown_candidate]) | pandas.DataFrame |
from pyops.utils import is_elapsed_time, parse_time, getMonth
import pandas as pd
from datetime import datetime
import os
class EVF:
def __init__(self, fname):
# Variable initialization
self.WTF = list()
self.meta = dict()
self.header = list()
self.ref_date = None
self.init_values = list()
self.include_files = list()
self.propagation_delay = None
# Loading the given file
self.load(fname)
def load(self, fname):
# Storing the name of the file for editting purposes
self.fname = fname
# Auxiliary dictionary to speed up the data convertion into pandas
aux_dict = dict(raw_time=[], time=[], event=[], experiment=[], item=[],
count=[], comment=[])
# Importing the file
out_ouf_metadata = False
with open(fname) as f:
for line in f:
if '\n' in line[0]:
pass
# Filtering lines with comments
elif '#' in line[0]:
if not out_ouf_metadata:
self.header.append(line)
self._read_metada(line)
else:
self.WTF.append(line)
# Storing events
elif is_elapsed_time(line.split()[0]):
aux_dict = self._read_events(line, aux_dict)
# Useful data from the header
else:
# We can say we are out of the metadate here because
# start_time and end_time are mandatory in the files
out_ouf_metadata = True
self._read_header_line(line.split())
# Closing the file
f.close()
# Creating the pandas dataframe
self.events = | pd.DataFrame(aux_dict) | pandas.DataFrame |
from collections import OrderedDict
import numpy as np
from numpy import nan, array
import pandas as pd
import pytest
from .conftest import (
assert_series_equal, assert_frame_equal, fail_on_pvlib_version)
from numpy.testing import assert_allclose
import unittest.mock as mock
from pvlib import inverter, pvsystem
from pvlib import atmosphere
from pvlib import iam as _iam
from pvlib import irradiance
from pvlib.location import Location
from pvlib import temperature
from pvlib._deprecation import pvlibDeprecationWarning
@pytest.mark.parametrize('iam_model,model_params', [
('ashrae', {'b': 0.05}),
('physical', {'K': 4, 'L': 0.002, 'n': 1.526}),
('martin_ruiz', {'a_r': 0.16}),
])
def test_PVSystem_get_iam(mocker, iam_model, model_params):
m = mocker.spy(_iam, iam_model)
system = pvsystem.PVSystem(module_parameters=model_params)
thetas = 1
iam = system.get_iam(thetas, iam_model=iam_model)
m.assert_called_with(thetas, **model_params)
assert iam < 1.
def test_PVSystem_multi_array_get_iam():
model_params = {'b': 0.05}
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=model_params),
pvsystem.Array(module_parameters=model_params)]
)
iam = system.get_iam((1, 5), iam_model='ashrae')
assert len(iam) == 2
assert iam[0] != iam[1]
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_iam((1,), iam_model='ashrae')
def test_PVSystem_get_iam_sapm(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
mocker.spy(_iam, 'sapm')
aoi = 0
out = system.get_iam(aoi, 'sapm')
_iam.sapm.assert_called_once_with(aoi, sapm_module_params)
assert_allclose(out, 1.0, atol=0.01)
def test_PVSystem_get_iam_interp(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
with pytest.raises(ValueError):
system.get_iam(45, iam_model='interp')
def test__normalize_sam_product_names():
BAD_NAMES = [' -.()[]:+/",', 'Module[1]']
NORM_NAMES = ['____________', 'Module_1_']
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
BAD_NAMES = ['Module[1]', 'Module(1)']
NORM_NAMES = ['Module_1_', 'Module_1_']
with pytest.warns(UserWarning):
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
BAD_NAMES = ['Module[1]', 'Module[1]']
NORM_NAMES = ['Module_1_', 'Module_1_']
with pytest.warns(UserWarning):
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
def test_PVSystem_get_iam_invalid(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
with pytest.raises(ValueError):
system.get_iam(45, iam_model='not_a_model')
def test_retrieve_sam_raise_no_parameters():
"""
Raise an exception if no parameters are provided to `retrieve_sam()`.
"""
with pytest.raises(ValueError) as error:
pvsystem.retrieve_sam()
assert 'A name or path must be provided!' == str(error.value)
def test_retrieve_sam_cecmod():
"""
Test the expected data is retrieved from the CEC module database. In
particular, check for a known module in the database and check for the
expected keys for that module.
"""
data = pvsystem.retrieve_sam('cecmod')
keys = [
'BIPV',
'Date',
'T_NOCT',
'A_c',
'N_s',
'I_sc_ref',
'V_oc_ref',
'I_mp_ref',
'V_mp_ref',
'alpha_sc',
'beta_oc',
'a_ref',
'I_L_ref',
'I_o_ref',
'R_s',
'R_sh_ref',
'Adjust',
'gamma_r',
'Version',
'STC',
'PTC',
'Technology',
'Bifacial',
'Length',
'Width',
]
module = 'Itek_Energy_LLC_iT_300_HE'
assert module in data
assert set(data[module].keys()) == set(keys)
def test_retrieve_sam_cecinverter():
"""
Test the expected data is retrieved from the CEC inverter database. In
particular, check for a known inverter in the database and check for the
expected keys for that inverter.
"""
data = pvsystem.retrieve_sam('cecinverter')
keys = [
'Vac',
'Paco',
'Pdco',
'Vdco',
'Pso',
'C0',
'C1',
'C2',
'C3',
'Pnt',
'Vdcmax',
'Idcmax',
'Mppt_low',
'Mppt_high',
'CEC_Date',
'CEC_Type',
]
inverter = 'Yaskawa_Solectria_Solar__PVI_5300_208__208V_'
assert inverter in data
assert set(data[inverter].keys()) == set(keys)
def test_sapm(sapm_module_params):
times = pd.date_range(start='2015-01-01', periods=5, freq='12H')
effective_irradiance = pd.Series([-1000, 500, 1100, np.nan, 1000],
index=times)
temp_cell = pd.Series([10, 25, 50, 25, np.nan], index=times)
out = pvsystem.sapm(effective_irradiance, temp_cell, sapm_module_params)
expected = pd.DataFrame(np.array(
[[ -5.0608322 , -4.65037767, nan, nan,
nan, -4.91119927, -4.15367716],
[ 2.545575 , 2.28773882, 56.86182059, 47.21121608,
108.00693168, 2.48357383, 1.71782772],
[ 5.65584763, 5.01709903, 54.1943277 , 42.51861718,
213.32011294, 5.52987899, 3.48660728],
[ nan, nan, nan, nan,
nan, nan, nan],
[ nan, nan, nan, nan,
nan, nan, nan]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=times)
assert_frame_equal(out, expected, check_less_precise=4)
out = pvsystem.sapm(1000, 25, sapm_module_params)
expected = OrderedDict()
expected['i_sc'] = 5.09115
expected['i_mp'] = 4.5462909092579995
expected['v_oc'] = 59.260800000000003
expected['v_mp'] = 48.315600000000003
expected['p_mp'] = 219.65677305534581
expected['i_x'] = 4.9759899999999995
expected['i_xx'] = 3.1880204359100004
for k, v in expected.items():
assert_allclose(out[k], v, atol=1e-4)
# just make sure it works with Series input
pvsystem.sapm(effective_irradiance, temp_cell,
pd.Series(sapm_module_params))
def test_PVSystem_sapm(sapm_module_params, mocker):
mocker.spy(pvsystem, 'sapm')
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
effective_irradiance = 500
temp_cell = 25
out = system.sapm(effective_irradiance, temp_cell)
pvsystem.sapm.assert_called_once_with(effective_irradiance, temp_cell,
sapm_module_params)
assert_allclose(out['p_mp'], 100, atol=100)
def test_PVSystem_multi_array_sapm(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
effective_irradiance = (100, 500)
temp_cell = (15, 25)
sapm_one, sapm_two = system.sapm(effective_irradiance, temp_cell)
assert sapm_one['p_mp'] != sapm_two['p_mp']
sapm_one_flip, sapm_two_flip = system.sapm(
(effective_irradiance[1], effective_irradiance[0]),
(temp_cell[1], temp_cell[0])
)
assert sapm_one_flip['p_mp'] == sapm_two['p_mp']
assert sapm_two_flip['p_mp'] == sapm_one['p_mp']
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.sapm(effective_irradiance, 10)
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.sapm(500, temp_cell)
@pytest.mark.parametrize('airmass,expected', [
(1.5, 1.00028714375),
(np.array([[10, np.nan]]), np.array([[0.999535, 0]])),
(pd.Series([5]), pd.Series([1.0387675]))
])
def test_sapm_spectral_loss(sapm_module_params, airmass, expected):
out = pvsystem.sapm_spectral_loss(airmass, sapm_module_params)
if isinstance(airmass, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-4)
def test_PVSystem_sapm_spectral_loss(sapm_module_params, mocker):
mocker.spy(pvsystem, 'sapm_spectral_loss')
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
airmass = 2
out = system.sapm_spectral_loss(airmass)
pvsystem.sapm_spectral_loss.assert_called_once_with(airmass,
sapm_module_params)
assert_allclose(out, 1, atol=0.5)
def test_PVSystem_multi_array_sapm_spectral_loss(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
loss_one, loss_two = system.sapm_spectral_loss(2)
assert loss_one == loss_two
# this test could be improved to cover all cell types.
# could remove the need for specifying spectral coefficients if we don't
# care about the return value at all
@pytest.mark.parametrize('module_parameters,module_type,coefficients', [
({'Technology': 'mc-Si'}, 'multisi', None),
({'Material': 'Multi-c-Si'}, 'multisi', None),
({'first_solar_spectral_coefficients': (
0.84, -0.03, -0.008, 0.14, 0.04, -0.002)},
None,
(0.84, -0.03, -0.008, 0.14, 0.04, -0.002))
])
def test_PVSystem_first_solar_spectral_loss(module_parameters, module_type,
coefficients, mocker):
mocker.spy(atmosphere, 'first_solar_spectral_correction')
system = pvsystem.PVSystem(module_parameters=module_parameters)
pw = 3
airmass_absolute = 3
out = system.first_solar_spectral_loss(pw, airmass_absolute)
atmosphere.first_solar_spectral_correction.assert_called_once_with(
pw, airmass_absolute, module_type, coefficients)
assert_allclose(out, 1, atol=0.5)
def test_PVSystem_multi_array_first_solar_spectral_loss():
system = pvsystem.PVSystem(
arrays=[
pvsystem.Array(
module_parameters={'Technology': 'mc-Si'},
module_type='multisi'
),
pvsystem.Array(
module_parameters={'Technology': 'mc-Si'},
module_type='multisi'
)
]
)
loss_one, loss_two = system.first_solar_spectral_loss(1, 3)
assert loss_one == loss_two
@pytest.mark.parametrize('test_input,expected', [
([1000, 100, 5, 45], 1140.0510967821877),
([np.array([np.nan, 1000, 1000]),
np.array([100, np.nan, 100]),
np.array([1.1, 1.1, 1.1]),
np.array([10, 10, 10])],
np.array([np.nan, np.nan, 1081.1574])),
([pd.Series([1000]), pd.Series([100]), pd.Series([1.1]),
pd.Series([10])],
pd.Series([1081.1574]))
])
def test_sapm_effective_irradiance(sapm_module_params, test_input, expected):
test_input.append(sapm_module_params)
out = pvsystem.sapm_effective_irradiance(*test_input)
if isinstance(test_input, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-1)
def test_PVSystem_sapm_effective_irradiance(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
mocker.spy(pvsystem, 'sapm_effective_irradiance')
poa_direct = 900
poa_diffuse = 100
airmass_absolute = 1.5
aoi = 0
p = (sapm_module_params['A4'], sapm_module_params['A3'],
sapm_module_params['A2'], sapm_module_params['A1'],
sapm_module_params['A0'])
f1 = np.polyval(p, airmass_absolute)
expected = f1 * (poa_direct + sapm_module_params['FD'] * poa_diffuse)
out = system.sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute, aoi)
pvsystem.sapm_effective_irradiance.assert_called_once_with(
poa_direct, poa_diffuse, airmass_absolute, aoi, sapm_module_params)
assert_allclose(out, expected, atol=0.1)
def test_PVSystem_multi_array_sapm_effective_irradiance(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
poa_direct = (500, 900)
poa_diffuse = (50, 100)
aoi = (0, 10)
airmass_absolute = 1.5
irrad_one, irrad_two = system.sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute, aoi
)
assert irrad_one != irrad_two
@pytest.fixture
def two_array_system(pvsyst_module_params, cec_module_params):
"""Two-array PVSystem.
Both arrays are identical.
"""
temperature_model = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass'
]
# Need u_v to be non-zero so wind-speed changes cell temperature
# under the pvsyst model.
temperature_model['u_v'] = 1.0
# parameter for fuentes temperature model
temperature_model['noct_installed'] = 45
# parameters for noct_sam temperature model
temperature_model['noct'] = 45.
temperature_model['module_efficiency'] = 0.2
module_params = {**pvsyst_module_params, **cec_module_params}
return pvsystem.PVSystem(
arrays=[
pvsystem.Array(
temperature_model_parameters=temperature_model,
module_parameters=module_params
),
pvsystem.Array(
temperature_model_parameters=temperature_model,
module_parameters=module_params
)
]
)
@pytest.mark.parametrize("poa_direct, poa_diffuse, aoi",
[(20, (10, 10), (20, 20)),
((20, 20), (10,), (20, 20)),
((20, 20), (10, 10), 20)])
def test_PVSystem_sapm_effective_irradiance_value_error(
poa_direct, poa_diffuse, aoi, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
two_array_system.sapm_effective_irradiance(
poa_direct, poa_diffuse, 10, aoi
)
def test_PVSystem_sapm_celltemp(mocker):
a, b, deltaT = (-3.47, -0.0594, 3) # open_rack_glass_glass
temp_model_params = {'a': a, 'b': b, 'deltaT': deltaT}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'sapm_cell')
temps = 25
irrads = 1000
winds = 1
out = system.sapm_celltemp(irrads, temps, winds)
temperature.sapm_cell.assert_called_once_with(irrads, temps, winds, a, b,
deltaT)
assert_allclose(out, 57, atol=1)
def test_PVSystem_sapm_celltemp_kwargs(mocker):
temp_model_params = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'sapm_cell')
temps = 25
irrads = 1000
winds = 1
out = system.sapm_celltemp(irrads, temps, winds)
temperature.sapm_cell.assert_called_once_with(irrads, temps, winds,
temp_model_params['a'],
temp_model_params['b'],
temp_model_params['deltaT'])
assert_allclose(out, 57, atol=1)
def test_PVSystem_multi_array_sapm_celltemp_different_arrays():
temp_model_one = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
temp_model_two = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'close_mount_glass_glass']
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(temperature_model_parameters=temp_model_one),
pvsystem.Array(temperature_model_parameters=temp_model_two)]
)
temp_one, temp_two = system.sapm_celltemp(
(1000, 1000), 25, 1
)
assert temp_one != temp_two
def test_PVSystem_pvsyst_celltemp(mocker):
parameter_set = 'insulated'
temp_model_params = temperature.TEMPERATURE_MODEL_PARAMETERS['pvsyst'][
parameter_set]
alpha_absorption = 0.85
module_efficiency = 0.17
module_parameters = {'alpha_absorption': alpha_absorption,
'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(module_parameters=module_parameters,
temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'pvsyst_cell')
irrad = 800
temp = 45
wind = 0.5
out = system.pvsyst_celltemp(irrad, temp, wind_speed=wind)
temperature.pvsyst_cell.assert_called_once_with(
irrad, temp, wind_speed=wind, u_c=temp_model_params['u_c'],
u_v=temp_model_params['u_v'], module_efficiency=module_efficiency,
alpha_absorption=alpha_absorption)
assert (out < 90) and (out > 70)
def test_PVSystem_faiman_celltemp(mocker):
u0, u1 = 25.0, 6.84 # default values
temp_model_params = {'u0': u0, 'u1': u1}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'faiman')
temps = 25
irrads = 1000
winds = 1
out = system.faiman_celltemp(irrads, temps, winds)
temperature.faiman.assert_called_once_with(irrads, temps, winds, u0, u1)
assert_allclose(out, 56.4, atol=1)
def test_PVSystem_noct_celltemp(mocker):
poa_global, temp_air, wind_speed, noct, module_efficiency = (
1000., 25., 1., 45., 0.2)
expected = 55.230790492
temp_model_params = {'noct': noct, 'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'noct_sam')
out = system.noct_sam_celltemp(poa_global, temp_air, wind_speed)
temperature.noct_sam.assert_called_once_with(
poa_global, temp_air, wind_speed, effective_irradiance=None, noct=noct,
module_efficiency=module_efficiency)
assert_allclose(out, expected)
# dufferent types
out = system.noct_sam_celltemp(np.array(poa_global), np.array(temp_air),
np.array(wind_speed))
assert_allclose(out, expected)
dr = pd.date_range(start='2020-01-01 12:00:00', end='2020-01-01 13:00:00',
freq='1H')
out = system.noct_sam_celltemp(pd.Series(index=dr, data=poa_global),
pd.Series(index=dr, data=temp_air),
pd.Series(index=dr, data=wind_speed))
assert_series_equal(out, pd.Series(index=dr, data=expected))
# now use optional arguments
temp_model_params.update({'transmittance_absorptance': 0.8,
'array_height': 2,
'mount_standoff': 2.0})
expected = 60.477703576
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
out = system.noct_sam_celltemp(poa_global, temp_air, wind_speed,
effective_irradiance=1100.)
assert_allclose(out, expected)
def test_PVSystem_noct_celltemp_error():
poa_global, temp_air, wind_speed, module_efficiency = (1000., 25., 1., 0.2)
temp_model_params = {'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
with pytest.raises(KeyError):
system.noct_sam_celltemp(poa_global, temp_air, wind_speed)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_functions(celltemp, two_array_system):
times = pd.date_range(start='2020-08-25 11:00', freq='H', periods=3)
irrad_one = pd.Series(1000, index=times)
irrad_two = pd.Series(500, index=times)
temp_air = pd.Series(25, index=times)
wind_speed = pd.Series(1, index=times)
temp_one, temp_two = celltemp(
two_array_system, (irrad_one, irrad_two), temp_air, wind_speed)
assert (temp_one != temp_two).all()
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_multi_temp(celltemp, two_array_system):
times = pd.date_range(start='2020-08-25 11:00', freq='H', periods=3)
irrad = pd.Series(1000, index=times)
temp_air_one = pd.Series(25, index=times)
temp_air_two = pd.Series(5, index=times)
wind_speed = pd.Series(1, index=times)
temp_one, temp_two = celltemp(
two_array_system,
(irrad, irrad),
(temp_air_one, temp_air_two),
wind_speed
)
assert (temp_one != temp_two).all()
temp_one_swtich, temp_two_switch = celltemp(
two_array_system,
(irrad, irrad),
(temp_air_two, temp_air_one),
wind_speed
)
assert_series_equal(temp_one, temp_two_switch)
assert_series_equal(temp_two, temp_one_swtich)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_multi_wind(celltemp, two_array_system):
times = pd.date_range(start='2020-08-25 11:00', freq='H', periods=3)
irrad = pd.Series(1000, index=times)
temp_air = pd.Series(25, index=times)
wind_speed_one = pd.Series(1, index=times)
wind_speed_two = pd.Series(5, index=times)
temp_one, temp_two = celltemp(
two_array_system,
(irrad, irrad),
temp_air,
(wind_speed_one, wind_speed_two)
)
assert (temp_one != temp_two).all()
temp_one_swtich, temp_two_switch = celltemp(
two_array_system,
(irrad, irrad),
temp_air,
(wind_speed_two, wind_speed_one)
)
assert_series_equal(temp_one, temp_two_switch)
assert_series_equal(temp_two, temp_one_swtich)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_temp_too_short(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), (1,), 1)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_temp_too_long(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), (1, 1, 1), 1)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_wind_too_short(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), 25, (1,))
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_wind_too_long(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), 25, (1, 1, 1))
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_poa_length_mismatch(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, 1000, 25, 1)
def test_PVSystem_fuentes_celltemp(mocker):
noct_installed = 45
temp_model_params = {'noct_installed': noct_installed}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
spy = mocker.spy(temperature, 'fuentes')
index = pd.date_range('2019-01-01 11:00', freq='h', periods=3)
temps = pd.Series(25, index)
irrads = pd.Series(1000, index)
winds = pd.Series(1, index)
out = system.fuentes_celltemp(irrads, temps, winds)
assert_series_equal(spy.call_args[0][0], irrads)
assert_series_equal(spy.call_args[0][1], temps)
assert_series_equal(spy.call_args[0][2], winds)
assert spy.call_args[1]['noct_installed'] == noct_installed
assert_series_equal(out, pd.Series([52.85, 55.85, 55.85], index,
name='tmod'))
def test_PVSystem_fuentes_celltemp_override(mocker):
# test that the surface_tilt value in the cell temp calculation can be
# overridden but defaults to the surface_tilt attribute of the PVSystem
spy = mocker.spy(temperature, 'fuentes')
noct_installed = 45
index = pd.date_range('2019-01-01 11:00', freq='h', periods=3)
temps = pd.Series(25, index)
irrads = pd.Series(1000, index)
winds = pd.Series(1, index)
# uses default value
temp_model_params = {'noct_installed': noct_installed}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params,
surface_tilt=20)
system.fuentes_celltemp(irrads, temps, winds)
assert spy.call_args[1]['surface_tilt'] == 20
# can be overridden
temp_model_params = {'noct_installed': noct_installed, 'surface_tilt': 30}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params,
surface_tilt=20)
system.fuentes_celltemp(irrads, temps, winds)
assert spy.call_args[1]['surface_tilt'] == 30
def test_Array__infer_temperature_model_params():
array = pvsystem.Array(module_parameters={},
racking_model='open_rack',
module_type='glass_polymer')
expected = temperature.TEMPERATURE_MODEL_PARAMETERS[
'sapm']['open_rack_glass_polymer']
assert expected == array._infer_temperature_model_params()
array = pvsystem.Array(module_parameters={},
racking_model='freestanding',
module_type='glass_polymer')
expected = temperature.TEMPERATURE_MODEL_PARAMETERS[
'pvsyst']['freestanding']
assert expected == array._infer_temperature_model_params()
array = pvsystem.Array(module_parameters={},
racking_model='insulated',
module_type=None)
expected = temperature.TEMPERATURE_MODEL_PARAMETERS[
'pvsyst']['insulated']
assert expected == array._infer_temperature_model_params()
def test_Array__infer_cell_type():
array = pvsystem.Array(module_parameters={})
assert array._infer_cell_type() is None
def test_calcparams_desoto(cec_module_params):
times = pd.date_range(start='2015-01-01', periods=3, freq='12H')
effective_irradiance = pd.Series([0.0, 800.0, 800.0], index=times)
temp_cell = pd.Series([25, 25, 50], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_desoto(
effective_irradiance,
temp_cell,
alpha_sc=cec_module_params['alpha_sc'],
a_ref=cec_module_params['a_ref'],
I_L_ref=cec_module_params['I_L_ref'],
I_o_ref=cec_module_params['I_o_ref'],
R_sh_ref=cec_module_params['R_sh_ref'],
R_s=cec_module_params['R_s'],
EgRef=1.121,
dEgdT=-0.0002677)
assert_series_equal(IL, pd.Series([0.0, 6.036, 6.096], index=times),
check_less_precise=3)
assert_series_equal(I0, pd.Series([0.0, 1.94e-9, 7.419e-8], index=times),
check_less_precise=3)
assert_allclose(Rs, 0.094)
assert_series_equal(Rsh, pd.Series([np.inf, 19.65, 19.65], index=times),
check_less_precise=3)
assert_series_equal(nNsVth, pd.Series([0.473, 0.473, 0.5127], index=times),
check_less_precise=3)
def test_calcparams_cec(cec_module_params):
times = pd.date_range(start='2015-01-01', periods=3, freq='12H')
effective_irradiance = pd.Series([0.0, 800.0, 800.0], index=times)
temp_cell = pd.Series([25, 25, 50], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_cec(
effective_irradiance,
temp_cell,
alpha_sc=cec_module_params['alpha_sc'],
a_ref=cec_module_params['a_ref'],
I_L_ref=cec_module_params['I_L_ref'],
I_o_ref=cec_module_params['I_o_ref'],
R_sh_ref=cec_module_params['R_sh_ref'],
R_s=cec_module_params['R_s'],
Adjust=cec_module_params['Adjust'],
EgRef=1.121,
dEgdT=-0.0002677)
assert_series_equal(IL, pd.Series([0.0, 6.036, 6.0896], index=times),
check_less_precise=3)
assert_series_equal(I0, pd.Series([0.0, 1.94e-9, 7.419e-8], index=times),
check_less_precise=3)
assert_allclose(Rs, 0.094)
assert_series_equal(Rsh, pd.Series([np.inf, 19.65, 19.65], index=times),
check_less_precise=3)
assert_series_equal(nNsVth, pd.Series([0.473, 0.473, 0.5127], index=times),
check_less_precise=3)
def test_calcparams_pvsyst(pvsyst_module_params):
times = pd.date_range(start='2015-01-01', periods=2, freq='12H')
effective_irradiance = pd.Series([0.0, 800.0], index=times)
temp_cell = | pd.Series([25, 50], index=times) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
from glob import glob
from collections import OrderedDict
# In[34]:
###plot the distribution of PPA
def PPAHist(chrs):
x = fgwas.loc[(fgwas['chr']==chrs), "PPA"]
print(max(x))
n, bins, patches = plt.hist(x, 50, density=True, facecolor='g', alpha=0.75)
plt.hist(x, bins=bins)
plt.xlabel('PPA')
plt.ylabel('count')
plt.title('Histogram of PPA of fine-mapping region #' + str(chrs))
plt.grid(True)
plt.show()
# In[35]:
# PPAHist(12)
# In[81]:
#get the max PPA by each seg
def rankPPA(fgwas, region):
print("best fine-mappinig result")
indices = fgwas.groupby('chunk')['PPA'].idxmax
print(fgwas.loc[indices])
print("best GWAS result")
indices = region.groupby('SEGNUMBER')['pval'].idxmin
print(region.loc[indices])
# In[70]:
# In[76]:
fgwas_output = '/data/analysis/UKBB/result/DeepBind.bfs.gz'
fgwas_nop='/data/analysis/UKBB/processed/test1.bfs.gz'
regionp='/data/analysis/UKBB/processed/I10.gwas.imputed_v3.both_sexes.finemapping.full.tsv.gz'
fgwas=pd.read_csv(fgwas_output,sep=' ')
fgwas_no= | pd.read_csv(fgwas_nop,sep=' ') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 12 13:08:12 2014
@author: Ken
"""
import marisa_trie
import re
import pandas as pd
import numpy as np
import sys
if sys.version_info[0] == 3:
basestring = str
unicode = str
from multiprocessing import Pool, cpu_count
"""
testTrie = marisa_trie.Trie([u'derpx', u'derpy', u'derpz'])
testFRDict = {u'derpx': u'derp', u'derpy': u'derp', u'derpz': u'derp'}
trieInput_df = pd.DataFrame(data=testFRDict, index=["Values"]).T
trieInput_df["Keys"] = trieInput_df.index
trieInput_df = trieInput_df.ix[:, ["Keys", "Values"]]
"""
class BulkFindReplacer:
def __init__(self, trieInput, version="v4"):
if isinstance(trieInput, basestring):
trieInput = pd.read_csv(trieInput)
self.frTrie = marisa_trie.Trie(list(trieInput.iloc[:, 0].apply(unicode)))
self.frDict = dict(zip(trieInput.iloc[:, 0].apply(unicode), trieInput.iloc[:, 1].apply(unicode)))
self.startRegex = re.compile(r'[^\w]')
self.endRegex = re.compile(r'[^\w]')
self.BulkFindReplace_str = self.BulkFindReplace_orig_str
if version == "v3":
self.BulkFindReplace_str = self.BulkFindReplace_v3_str
elif version == "v4":
self.BulkFindReplace_str = self.BulkFindReplace_v4_str
def BulkFindReplace_orig_str(self, inString, startRegex=r'[^\w]', endRegex=r'[^\w]'):
i = 0
outString = inString
strLen = len(outString)
while (i < strLen):
if i is 0 or re.search(startRegex, outString[i - 1]):
remainingStr = outString[i:]
pref_list = self.frTrie.prefixes(remainingStr)
if len(pref_list) > 0:
# iterate backwards through list
for pref in pref_list[::-1]:
# make sure char after prefix is an endRegex char
if (len(remainingStr) is len(pref) or re.search(endRegex, remainingStr[len(pref)])):
# if there is a valid prefix, replace 1st instance
mapStr = self.frDict[pref]
if mapStr != pref:
outString = outString[:i] + remainingStr.replace(pref, mapStr, 1)
strLen = len(outString)
i += len(mapStr) - 1
break
i += 1
return outString
def BulkFindReplace_v3_str(self, inString, startRegex=r'[^\w]', endRegex=r'[^\w]'):
i = 0
outString = inString
strLen = len(outString)
while (i < strLen):
if i is 0 or self.startRegex.search(outString[i - 1]):
remainingStr = outString[i:]
pref_list = self.frTrie.prefixes(remainingStr)
if len(pref_list) > 0:
# iterate backwards through list
for pref in pref_list[::-1]:
# make sure char after prefix is an endRegex char
if (len(remainingStr) is len(pref) or self.endRegex.search(remainingStr[len(pref)])):
# if there is a valid prefix, replace 1st instance
mapStr = self.frDict[pref]
if mapStr != pref:
outString = outString[:i] + remainingStr.replace(pref, mapStr, 1)
strLen = len(outString)
i += len(mapStr) - 1
break
i += 1
return outString
def BulkFindReplace_v4_str(self, inString, startRegex=r'[^\w]', endRegex=r'[^\w]'):
i = 0
outString = inString
outString_list = []
# while (i < strLen):
iSkipTo = -1
lastCut = 0
for i in [0] + [x.end() for x in self.startRegex.finditer(inString)]:
if i >= iSkipTo:
remainingStr = inString[i:]
pref_list = self.frTrie.prefixes(remainingStr)
if len(pref_list) > 0:
# iterate backwards through list
for pref in pref_list[::-1]:
# make sure char after prefix is an endRegex char
if (len(remainingStr) is len(pref) or self.endRegex.search(remainingStr[len(pref)])):
# if there is a valid prefix, replace 1st instance
mapStr = self.frDict[pref]
if mapStr != pref:
addStr = inString[lastCut:i] + mapStr
outString_list.append(addStr)
lastCut = i + len(pref)
# outString = outString[:i] + remainingStr.replace(pref, mapStr, 1)
# strLen = len(outString)
iSkipTo = i + len(pref)
break
if len(outString_list) > 0:
if lastCut < len(inString):
outString_list.append(inString[lastCut:len(inString)])
outString = "".join(outString_list)
else:
outString = inString
return outString
def BulkFindReplaceToCompletion_str(self, inString, startRegex=r'[^\w]', endRegex=r'[^\w]', maxCycles=10):
cycle = 0
previousStr = inString
inString = self.BulkFindReplace_str(inString, startRegex, endRegex)
cycle = 1
if inString == previousStr or cycle >= maxCycles:
return inString
# Save secondToLastStr to help prevent endless cycles
secondToLastStr = previousStr
previousStr = inString
inString = self.BulkFindReplace_str(inString, startRegex, endRegex)
cycle = 2
while (inString != previousStr and inString != secondToLastStr and cycle < maxCycles):
secondToLastStr = previousStr
previousStr = inString
inString = self.BulkFindReplace_str(inString, startRegex, endRegex)
cycle += 1
# if cycle is 10:
# return "\nsecondToLastStr: " + secondToLastStr + ";\npreviousStr: " + previousStr + ";\ncurrentStr: " + inString + ";\n"
return inString
def BulkFindReplace(self, strSeries, startRegex=r'[^\w]', endRegex=r'[^\w]', maxCycles=10):
isBaseString = isinstance(strSeries, basestring)
strSeries = pd.Series(strSeries).copy()
strSeries = strSeries.apply(unicode)
strSeries = strSeries.apply(self.BulkFindReplaceToCompletion_str, (startRegex, endRegex, maxCycles))
if isBaseString:
return strSeries.iloc[0]
return strSeries
def BulkFindReplaceMPHelper(self, args):
strSeries, startRegex, endRegex, maxCycles = args
strSeries = strSeries.apply(self.BulkFindReplaceToCompletion_str, (startRegex, endRegex, maxCycles))
return strSeries
def BulkFindReplaceMultiProc(self, strSeries, startRegex=r'[^\w]', endRegex=r'[^\w]', maxCycles=10, workers=-1):
isBaseString = isinstance(strSeries, basestring)
strSeries = pd.Series(strSeries).copy()
strSeries = strSeries.fillna("")
strSeries = strSeries.apply(unicode)
if workers == -1:
if cpu_count() % 2 == 0:
workers = int(cpu_count()/2)
else:
workers = cpu_count()
if workers > 1:
pool = Pool(processes=workers)
strSeries_list = pool.map(self.BulkFindReplaceMPHelper, [(d, startRegex, endRegex, maxCycles) for d in np.array_split(strSeries, workers)])
pool.close()
strSeries = | pd.concat(strSeries_list) | pandas.concat |
# Python 3.5
# Script written by <NAME> (<EMAIL>), <NAME> (<EMAIL>), and <NAME> (<EMAIL>)
# VERSION 0.1 - JUNE 2020
#--------TURN OFF MAGMASAT WARNING--------#
import warnings
warnings.filterwarnings("ignore", message="rubicon.objc.ctypes_patch has only been tested ")
warnings.filterwarnings("ignore", message="The handle")
#-----------------IMPORTS-----------------#
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
from cycler import cycler
from abc import ABC, abstractmethod
from scipy.optimize import root_scalar
from scipy.optimize import root
from scipy.optimize import minimize
import sys
import sympy
from copy import copy
# import anvil_server
#--------------MELTS preamble---------------#
from thermoengine import equilibrate
# instantiate thermoengine equilibrate MELTS instance
melts = equilibrate.MELTSmodel('1.2.0')
# Suppress phases not required in the melts simulation
phases = melts.get_phase_names()
for phase in phases:
melts.set_phase_inclusion_status({phase: False})
melts.set_phase_inclusion_status({'Fluid': True, 'Liquid': True})
#----------DEFINE SOME CONSTANTS-------------#
oxides = ['SiO2', 'TiO2', 'Al2O3', 'Fe2O3', 'Cr2O3', 'FeO', 'MnO', 'MgO', 'NiO', 'CoO', 'CaO', 'Na2O', 'K2O', 'P2O5',
'H2O', 'CO2']
anhydrous_oxides = ['SiO2', 'TiO2', 'Al2O3', 'Fe2O3', 'Cr2O3', 'FeO', 'MnO', 'MgO', 'NiO', 'CoO', 'CaO', 'Na2O', 'K2O', 'P2O5']
volatiles = ['H2O', 'CO2']
oxideMass = {'SiO2': 28.085+32, 'MgO': 24.305+16, 'FeO': 55.845+16, 'CaO': 40.078+16, 'Al2O3': 2*26.982+16*3, 'Na2O': 22.99*2+16,
'K2O': 39.098*2+16, 'MnO': 54.938+16, 'TiO2': 47.867+32, 'P2O5': 2*30.974+5*16, 'Cr2O3': 51.996*2+3*16,
'NiO': 58.693+16, 'CoO': 28.01+16, 'Fe2O3': 55.845*2+16*3,
'H2O': 18.02, 'CO2': 44.01}
CationNum = {'SiO2': 1, 'MgO': 1, 'FeO': 1, 'CaO': 1, 'Al2O3': 2, 'Na2O': 2,
'K2O': 2, 'MnO': 1, 'TiO2': 1, 'P2O5': 2, 'Cr2O3': 2,
'NiO': 1, 'CoO': 1, 'Fe2O3': 2, 'H2O': 2, 'CO2': 1}
OxygenNum = {'SiO2': 2, 'MgO': 1, 'FeO': 1, 'CaO': 1, 'Al2O3': 3, 'Na2O': 1,
'K2O': 1, 'MnO': 1, 'TiO2': 2, 'P2O5': 5, 'Cr2O3': 3,
'NiO': 1, 'CoO': 1, 'Fe2O3': 3, 'H2O': 1, 'CO2': 2}
CationCharge = {'SiO2': 4, 'MgO': 2, 'FeO': 2, 'CaO': 2, 'Al2O3': 3, 'Na2O': 1,
'K2O': 1, 'MnO': 2, 'TiO2': 4, 'P2O5': 5, 'Cr2O3': 3,
'NiO': 2, 'CoO': 2, 'Fe2O3': 3, 'H2O': 1, 'CO2': 4}
CationMass = {'SiO2': 28.085, 'MgO': 24.305, 'FeO': 55.845, 'CaO': 40.078, 'Al2O3': 26.982, 'Na2O': 22.990,
'K2O': 39.098, 'MnO': 54.938, 'TiO2': 47.867, 'P2O5': 30.974, 'Cr2O3': 51.996,
'NiO': 58.693, 'CoO': 28.01, 'Fe2O3': 55.845, 'H2O': 2, 'CO2': 12.01}
oxides_to_cations = {'SiO2': 'Si', 'MgO': 'Mg', 'FeO': 'Fe', 'CaO': 'Ca', 'Al2O3': 'Al', 'Na2O': 'Na',
'K2O': 'K', 'MnO': 'Mn', 'TiO2': 'Ti', 'P2O5': 'P', 'Cr2O3': 'Cr',
'NiO': 'Ni', 'CoO': 'Co', 'Fe2O3': 'Fe3', 'H2O': 'H', 'CO2': 'C'}
cations_to_oxides = {'Si': 'SiO2', 'Mg': 'MgO', 'Fe': 'FeO', 'Ca': 'CaO', 'Al': 'Al2O3', 'Na': 'Na2O',
'K': 'K2O', 'Mn': 'MnO', 'Ti': 'TiO2', 'P': 'P2O5', 'Cr': 'Cr2O3',
'Ni': 'NiO', 'Co': 'CoO', 'Fe3': 'Fe2O3', 'H': 'H2O', 'C': 'CO2'}
#----------DEFINE SOME EXCEPTIONS--------------#
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class InputError(Error):
"""Exception raised for errors in the input.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
class SaturationError(Error):
"""Exception raised for errors thrown when a sample does not reach saturation.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
#----------DEFINE CUSTOM PLOTTING FORMATTING------------#
style = "seaborn-colorblind"
plt.style.use(style)
plt.rcParams["mathtext.default"] = "regular"
plt.rcParams["mathtext.fontset"] = "dejavusans"
mpl.rcParams['patch.linewidth'] = 1
mpl.rcParams['axes.linewidth'] = 1
plt.rcParams['axes.titlesize'] = 20
plt.rcParams['axes.labelsize'] = 18
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 14
plt.rcParams['legend.fontsize'] = 14
#Define color cycler based on plot style set here
the_rc = plt.style.library[style] #get style formatting set by plt.style.use()
color_list = the_rc['axes.prop_cycle'].by_key()['color'] #list of colors by hex code
color_cyler = the_rc['axes.prop_cycle'] #get the cycler
def printTable(myDict):
""" Pretty print a dictionary (as pandas DataFrame)
Parameters
----------
myDict: dict
A dictionary
Returns
-------
pandas DataFrame
The input dictionary converted to a pandas DataFrame
"""
try:
oxidesum = sum(myDict[oxide] for oxide in oxides)
myDict.update({"Sum oxides": oxidesum})
except:
pass
table = pd.DataFrame([v for v in myDict.values()], columns = ['value'],
index = [k for k in myDict.keys()])
return table
#----------DEFINE SOME UNIVERSAL INFORMATIVE METHODS--------------#
def get_model_names():
"""
Returns all available model names as a list of strings.
"""
model_names = []
for key, value in default_models.items():
model_names.append(key)
return model_names
#----------DEFINE SOME BASIC DATA TRANSFORMATION METHODS-----------#
def mol_to_wtpercent(sample):
"""
Takes in a pandas DataFrame containing multi-sample input or a dictionary containing single-sample input
and returns a pandas DataFrame object with oxide values converted from mole percent to wt percent.
Parameters
----------
oxides: pandas DataFrame object or dictionary
Variable name referring to the pandas DataFrame object that contains user-imported data or a dictionary
for single-sample input.
"""
data = sample
if isinstance(sample, pd.DataFrame):
for key, value in oxideMass.items():
data.loc[:, key] *= value
data["MPOSum"] = sum([data[oxide] for oxide in oxides])
for oxide in oxides:
data.loc[:, oxide] /= data['MPOSum']
data.loc[:, oxide] *= 100
del data['MPOSum']
elif isinstance(sample, dict):
for oxide in oxides:
if oxide in data.keys():
pass
else:
data[oxide] = 0.0
data = {oxide: data[oxide] for oxide in oxides}
for key, value in oxideMass.items():
data.update({key: (data[key] * value)})
MPOSum = sum(data.values())
for key, value in data.items():
data.update({key: 100 * value / MPOSum})
return data
def wtpercentOxides_to_molCations(oxides):
"""Takes in a pandas Series containing major element oxides in wt%, and converts it
to molar proportions of cations (normalised to 1).
Parameters
----------
oxides dict or pandas Series
Major element oxides in wt%.
Returns
-------
dict or pandas Series
Molar proportions of cations, normalised to 1.
"""
molCations = {}
_oxides = oxides.copy()
if type(oxides) == dict:
oxideslist = list(_oxides.keys())
elif type(oxides) == pd.core.series.Series:
oxideslist = list(_oxides.index)
else:
raise InputError("The composition input must be a pandas Series or dictionary.")
for ox in oxideslist:
cation = oxides_to_cations[ox]
molCations[cation] = CationNum[ox]*_oxides[ox]/oxideMass[ox]
if type(oxides) == pd.core.series.Series:
molCations = pd.Series(molCations)
molCations = molCations/molCations.sum()
else:
total = np.sum(list(molCations.values()))
for ox in oxideslist:
cation = oxides_to_cations[ox]
molCations[cation] = molCations[cation]/total
return molCations
def wtpercentOxides_to_molOxides(oxides):
""" Takes in a pandas Series or dict containing major element oxides in wt%, and converts it
to molar proportions (normalised to 1).
Parameters
----------
oxides dict or pandas Series
Major element oxides in wt%
Returns
-------
dict or pandas Series
Molar proportions of major element oxides, normalised to 1.
"""
molOxides = {}
_oxides = oxides.copy()
if type(oxides) == dict or type(oxides) == pd.core.series.Series:
if type(oxides) == dict:
oxideslist = list(oxides.keys())
elif type(oxides) == pd.core.series.Series:
oxideslist = list(oxides.index)
for ox in oxideslist:
molOxides[ox] = _oxides[ox]/oxideMass[ox]
if type(oxides) == pd.core.series.Series:
molOxides = pd.Series(molOxides)
molOxides = molOxides/molOxides.sum()
else:
total = np.sum(list(molOxides.values()))
for ox in oxideslist:
molOxides[ox] = molOxides[ox]/total
return molOxides
elif isinstance(sample, pd.DataFrame):
data = sample
for key, value in oxideMass.items():
data.loc[:, key] /= value
data["MPOSum"] = sum([data[oxide] for oxide in oxides])
for oxide in oxides:
data.loc[:, oxide] /= data['MPOSum']
del data['MPOSum']
return data
else:
raise InputError("The composition input must be a pandas Series or dictionary.")
def wtpercentOxides_to_molSingleO(oxides,exclude_volatiles=False):
""" Takes in a pandas Series containing major element oxides in wt%, and constructs
the chemical formula, on a single oxygen basis.
Parameters
----------
oxides dict or pandas Series
Major element oxides in wt%
Returns
-------
dict or pandas Series
The chemical formula of the composition, on a single oxygen basis. Each element is
a separate entry in the Series.
"""
molCations = {}
_oxides = oxides.copy()
if type(oxides) == dict:
oxideslist = list(oxides.keys())
elif type(oxides) == pd.core.series.Series:
oxideslist = list(oxides.index)
else:
raise InputError("The composition input must be a pandas Series or dictionary.")
total_O = 0.0
for ox in oxideslist:
if exclude_volatiles == False or (ox != 'H2O' and ox != 'CO2'):
cation = oxides_to_cations[ox]
molCations[cation] = CationNum[ox]*oxides[ox]/oxideMass[ox]
total_O += OxygenNum[ox]*oxides[ox]/oxideMass[ox]
if type(oxides) == pd.core.series.Series:
molCations = pd.Series(molCations)
molCations = molCations/total_O
else:
# total = np.sum(list(molCations.values()))
for ox in oxideslist:
if exclude_volatiles == False or (ox != 'H2O' and ox != 'CO2'):
cation = oxides_to_cations[ox]
molCations[cation] = molCations[cation]/total_O
return molCations
def wtpercentOxides_to_formulaWeight(sample,exclude_volatiles=False):
""" Converts major element oxides in wt% to the formula weight (on a 1 oxygen basis).
Parameters
----------
sample dict or pandas Series
Major element oxides in wt%.
exclude_volatiles bool
If True H2O and CO2 will be excluded from the formula weight calculation.
Returns
-------
float
The formula weight of the composition, on a one oxygen basis.
"""
if type(sample) == dict:
_sample = pd.Series(sample.copy())
elif type(sample) != pd.core.series.Series:
raise InputError("The composition input must be a pandas Series or dictionary.")
else:
_sample = sample.copy()
cations = wtpercentOxides_to_molSingleO(_sample,exclude_volatiles=exclude_volatiles)
if type(cations) != dict:
cations = dict(cations)
# if exclude_volatiles == True:
# if 'C' in cations:
# cations.pop('C')
# if 'H' in cations:
# cations.pop('H')
# newsum = 0
# for cation in cations:
# newsum += OxygenNum[cations_to_oxides[cation]]
# for cation in cations:
# cations[cation] = cations[cation]/newsum
FW = 15.999
for cation in list(cations.keys()):
FW += cations[cation]*CationMass[cations_to_oxides[cation]]
return FW
#----------DATA TRANSFORMATION FOR PANDAS DATAFRAMES---------#
def fluid_molfrac_to_wt(data, H2O_colname='XH2O_fl_VESIcal', CO2_colname='XCO2_fl_VESIcal'):
"""
Takes in a pandas dataframe object and converts only the fluid composition from mole fraction to wt%, leaving the melt composition
in tact. The user must specify the names of the XH2O_fl and XCO2_fl columns.
Parameters
----------
data: pandas DataFrame
Sample composition(s) containing columns for H2O and CO2 concentrations in the fluid.
H2O_colname: str
OPTIONAL. The default value is 'XH2O_fl', which is what is returned by ExcelFile() core calculations.
String containing the name of the column corresponding to the H2O concentration in the fluid, in mol fraction.
CO2_colname: str
OPTIONAL. The default value is 'XCO2_fl', which is what is returned by ExcelFile() core calculations.
String containing the name of the column corresponding to the CO2 concentration in the fluid, in mol fraction.
Returns
-------
pandas DataFrame
Original data passed plus newly calculated values are returned.
"""
convData = data.copy()
MPO_H2O_list = []
MPO_CO2_list = []
for index, row in convData.iterrows():
MPO_H2O_list.append(row[H2O_colname] * oxideMass["H2O"])
MPO_CO2_list.append(row[CO2_colname] * oxideMass["CO2"])
convData["MPO_H2O"] = MPO_H2O_list
convData["MPO_CO2"] = MPO_CO2_list
convData["H2O_fl_wt"] = 100 * convData["MPO_H2O"] / (convData["MPO_H2O"] + convData["MPO_CO2"])
convData["CO2_fl_wt"] = 100 * convData["MPO_CO2"] / (convData["MPO_H2O"] + convData["MPO_CO2"])
del convData["MPO_H2O"]
del convData["MPO_CO2"]
return convData
def fluid_wt_to_molfrac(data, H2O_colname='H2O_fl_wt', CO2_colname='CO2_fl_wt'):
"""
Takes in a pandas dataframe object and converts only the fluid composition from wt% to mole fraction, leaving the melt composition
in tact. The user must specify the names of the H2O_fl_wt and CO2_fl_wt columns.
Parameters
----------
data: pandas DataFrame
DataFrame containing columns for H2O and CO2 concentrations in the fluid.
H2O_colname: str
OPTIONAL. The default value is 'H2O_fl_wt', which is what is returned by ExcelFile() core calculations.
String containing the name of the column corresponding to the H2O concentration in the fluid, in wt%.
CO2_colname: str
OPTIONAL. The default value is 'CO2_fl_wt', which is what is returned by ExcelFile() core calculations.
String containing the name of the column corresponding to the CO2 concentration in the fluid, in wt%.
Returns
-------
pandas DataFrame
Original data passed plus newly calculated values are returned.
"""
convData = data.copy()
MPO_H2O_list = []
MPO_CO2_list = []
for index, row in convData.iterrows():
MPO_H2O_list.append(row[H2O_colname] / oxideMass["H2O"])
MPO_CO2_list.append(row[CO2_colname] / oxideMass["CO2"])
convData["MPO_H2O"] = MPO_H2O_list
convData["MPO_CO2"] = MPO_CO2_list
convData["XH2O_fl"] = convData["MPO_H2O"] / (convData["MPO_H2O"] + convData["MPO_CO2"])
convData["XCO2_fl"] = convData["MPO_CO2"] / (convData["MPO_H2O"] + convData["MPO_CO2"])
del convData["MPO_H2O"]
del convData["MPO_CO2"]
return convData
#----------DEFINE SOME NORMALIZATION METHODS-----------#
def normalize(sample):
"""Normalizes an input composition to 100%. This is the 'standard' normalization routine.
Parameters
----------
sample: pandas Series, dictionary, pandas DataFrame, or ExcelFile object
A single composition can be passed as a dictionary. Multiple compositions can be passed either as
a pandas DataFrame or an ExcelFile object. Compositional information as oxides must be present.
Returns
-------
Sample passed as > Returned as
pandas Series > pandas Series
dictionary > dictionary
pandas DataFrame > pandas DataFrame
ExcelFile object > pandas DataFrame
Normalized major element oxides.
"""
def single_normalize(sample):
single_sample = sample
return {k: 100.0 * v / sum(single_sample.values()) for k, v in single_sample.items()}
def multi_normalize(sample):
multi_sample = sample.copy()
multi_sample["Sum"] = sum([multi_sample[oxide] for oxide in oxides])
for column in multi_sample:
if column in oxides:
multi_sample[column] = 100.0*multi_sample[column]/multi_sample["Sum"]
del multi_sample["Sum"]
return multi_sample
if isinstance(sample, dict):
_sample = sample.copy()
return single_normalize(_sample)
elif isinstance(sample, pd.core.series.Series):
_sample = pd.Series(sample.copy())
sample_dict = sample.to_dict()
return pd.Series(single_normalize(sample_dict))
elif isinstance(sample, ExcelFile):
_sample = sample
data = _sample.data
return multi_normalize(data)
elif isinstance(sample, pd.DataFrame):
return multi_normalize(sample)
def normalize_FixedVolatiles(sample):
""" Normalizes major element oxides to 100 wt%, including volatiles. The volatile
wt% will remain fixed, whilst the other major element oxides are reduced proportionally
so that the total is 100 wt%.
Parameters
----------
sample: pandas Series, dictionary, pandas DataFrame, or ExcelFile object
Major element oxides in wt%
Returns
-------
Sample passed as > Returned as
pandas Series > pandas Series
dictionary > dictionary
pandas DataFrame > pandas DataFrame
ExcelFile object > pandas DataFrame
Normalized major element oxides.
"""
def single_FixedVolatiles(sample):
normalized = pd.Series({},dtype=float)
volatiles = 0
if 'CO2' in list(_sample.index):
volatiles += _sample['CO2']
if 'H2O' in list(_sample.index):
volatiles += _sample['H2O']
for ox in list(_sample.index):
if ox != 'H2O' and ox != 'CO2':
normalized[ox] = _sample[ox]
normalized = normalized/np.sum(normalized)*(100-volatiles)
if 'CO2' in list(_sample.index):
normalized['CO2'] = _sample['CO2']
if 'H2O' in list(_sample.index):
normalized['H2O'] = _sample['H2O']
return normalized
def multi_FixedVolatiles(sample):
multi_sample = sample.copy()
multi_sample["Sum_anhy"] = sum([multi_sample[oxide] for oxide in anhydrous_oxides])
multi_sample["Sum_vols"] = sum([multi_sample[vol] for vol in volatiles])
for column in multi_sample:
if column in anhydrous_oxides:
multi_sample[column] = 100.0*multi_sample[column]/multi_sample["Sum_anhy"]
multi_sample[column] = multi_sample[column] / (100.0/(100.0-multi_sample["Sum_vols"]))
del multi_sample["Sum_anhy"]
del multi_sample["Sum_vols"]
return multi_sample
if isinstance(sample, dict):
_sample = pd.Series(sample.copy())
return single_FixedVolatiles(_sample).to_dict()
elif isinstance(sample, pd.core.series.Series):
_sample = pd.Series(sample.copy())
return single_FixedVolatiles(_sample)
elif isinstance(sample, ExcelFile):
_sample = sample
data = _sample.data
return multi_FixedVolatiles(data)
elif isinstance(sample, pd.DataFrame):
return multi_FixedVolatiles(sample)
else:
raise InputError("The composition input must be a pandas Series or dictionary for single sample \
or a pandas DataFrame or ExcelFile object for multi-sample.")
def normalize_AdditionalVolatiles(sample):
"""Normalises major element oxide wt% to 100%, assuming it is volatile-free. If
H2O or CO2 are passed to the function, their un-normalized values will be retained
in addition to the normalized non-volatile oxides, summing to >100%.
Parameters
----------
sample: pandas Series, dictionary, pandas DataFrame, or ExcelFile object
Major element oxides in wt%
Returns
-------
Sample passed as > Returned as
pandas Series > pandas Series
dictionary > dictionary
pandas DataFrame > pandas DataFrame
ExcelFile object > pandas DataFrame
Normalized major element oxides.
"""
def single_AdditionalVolatiles(sample):
normalized = pd.Series({})
for ox in list(_sample.index):
if ox != 'H2O' and ox != 'CO2':
normalized[ox] = _sample[ox]
normalized = normalized/np.sum(normalized)*100
if 'H2O' in _sample.index:
normalized['H2O'] = _sample['H2O']
if 'CO2' in _sample.index:
normalized['CO2'] = _sample['CO2']
return normalized
def multi_AdditionalVolatiles(sample):
multi_sample = sample.copy()
multi_sample["Sum"] = sum([multi_sample[oxide] for oxide in anhydrous_oxides])
for column in multi_sample:
if column in anhydrous_oxides:
multi_sample[column] = 100.0*multi_sample[column]/multi_sample["Sum"]
del multi_sample["Sum"]
return multi_sample
if isinstance(sample, dict):
_sample = pd.Series(sample.copy())
return single_AdditionalVolatiles(_sample).to_dict()
elif isinstance(sample, pd.core.series.Series):
_sample = pd.Series(sample.copy())
return single_AdditionalVolatiles(sample)
elif isinstance(sample, ExcelFile):
_sample = sample
data = _sample.data
return multi_AdditionalVolatiles(data)
elif isinstance(sample, pd.DataFrame):
return multi_AdditionalVolatiles(sample)
else:
raise InputError("The composition input must be a pandas Series or dictionary for single sample \
or a pandas DataFrame or ExcelFile object for multi-sample.")
#------------DEFINE MAJOR CLASSES-------------------#
class ExcelFile(object):
"""An excel file with sample names and oxide compositions
Attributes
----------
filename: str
Path to the excel file, e.g., "my_file.xlsx"
sheet_name: str
OPTIONAL. Default value is 0 which gets the first sheet in the excel spreadsheet file. This implements the pandas.
read_excel() sheet_name parameter. But functionality to read in more than one sheet at a time (e.g., pandas.read_excel(sheet_name=None))
is not yet imlpemented in VESIcal. From the pandas 1.0.4 documentation:
Available cases:
- Defaults to 0: 1st sheet as a DataFrame
- 1: 2nd sheet as a DataFrame
- "Sheet1": Load sheet with name “Sheet1”
input_type: str or int
OPTIONAL. Default is 'wtpercent'. String defining whether the oxide composition is given in wt percent
("wtpercent", which is the default), mole percent ("molpercent"), or mole fraction ("molfrac").
label: str
OPTIONAL. Default is 'Label'. Name of the column within the passed Excel file referring to sample names.
"""
def __init__(self, filename, sheet_name=0, input_type='wtpercent', label='Label', **kwargs):
"""Return an ExcelFile object whoes parameters are defined here."""
if isinstance(sheet_name, str) or isinstance(sheet_name, int):
pass
else:
raise InputError("If sheet_name is passed, it must be of type str or int. Currently, VESIcal cannot import more than one sheet at a time.")
self.input_type = input_type
data = pd.read_excel(filename, sheet_name=sheet_name)
data = data.fillna(0)
try:
data = data.set_index(label)
except:
raise InputError(
"Imported file must contain a column of sample names. If this column is not titled 'Label' (the default value), you must pass the column name to arg label. For example: ExcelFile('myfile.xslx', label='SampleNames')") #TODO test
if 'model' in kwargs:
warnings.warn("You don't need to pass a model here, so it will be ignored. You can specify a model when performing calculations on your dataset (e.g., calculate_dissolved_volatiles())",RuntimeWarning)
total_iron_columns = ["FeOt", "FeOT", "FeOtot", "FeOtotal", "FeOstar", "FeO*"]
for name in total_iron_columns:
if name in data.columns:
if 'FeO' in data.columns:
warnings.warn("Both " + str(name) + " and FeO columns were passed. " + str(name) + " column will be ignored.",RuntimeWarning)
else:
warnings.warn("Total iron column " + str(name) + " detected. This column will be treated as FeO. If Fe2O3 data are not given, Fe2O3 will be 0.0.",RuntimeWarning)
data['FeO'] = data[name]
for oxide in oxides:
if oxide in data.columns:
pass
else:
data[oxide] = 0.0
# TODO test all input types produce correct values
if input_type == "wtpercent":
pass
if input_type == "molpercent":
data = mol_to_wtpercent(data)
if input_type == "molfrac":
data = mol_to_wtpercent(data)
self.data = data
def preprocess_sample(self,sample):
"""
Adds 0.0 values to any oxide data not passed.
Parameters
----------
sample: pandas DataFrame
self.data composition of samples in wt% oxides
Returns
-------
pandas DataFrame
"""
for oxide in oxides:
if oxide in self.data.columns:
pass
else:
self.data[oxide] = 0.0
return sample
def get_sample_oxide_comp(self, sample, norm='none'):
"""
Returns oxide composition of a single sample from a user-imported excel file as a dictionary
Parameters
----------
sample: string
Name of the desired sample
norm_style: string
OPTIONAL. Default value is 'standard'. This specifies the style of normalization applied to the sample.
'standard' normalizes the entire input composition (including any volatiles) to 100%.
'fixedvolatiles' normalizes oxides to 100%, including volatiles. The volatile
wt% will remain fixed, whilst the other major element oxides are reduced proportionally
so that the total is 100 wt%.
'additionalvolatiles' normalizes oxides to 100%, assuming it is volatile-free. If
H2O or CO2 are passed to the function, their un-normalized values will be retained
in addition to the normalized non-volatile oxides, summing to >100%.
'none' returns the value-for-value un-normalized composition.
Returns
-------
dictionary
Composition of the sample as oxides
"""
if norm == 'none' or norm == 'standard' or norm == 'fixedvolatiles' or norm == 'additionalvolatiles':
pass
else:
raise InputError('norm must be either none, standard, fixedvolatiles, or additionalvolatiles.')
data = self.data
my_sample = pd.DataFrame(data.loc[sample])
sample_dict = (my_sample.to_dict()[sample])
sample_oxides = {}
for item, value in sample_dict.items():
if item in oxides:
sample_oxides.update({item: value})
if norm == 'standard':
return normalize(sample_oxides)
if norm == 'fixedvolatiles':
return normalize_FixedVolatiles(sample_oxides)
if norm == 'additionalvolatiles':
return normalize_AdditionalVolatiles(sample_oxides)
if norm == 'none':
return sample_oxides
def get_XH2O_fluid(self, sample, temperature, pressure, H2O, CO2):
"""An internally used function to calculate fluid composition.
Parameters
----------
sample: dictionary
Sample composition in wt% oxides
temperature: float
Temperature in degrees C.
pressure: float
Pressure in bars
H2O: float
wt% H2O in the system
CO2: float
wt% CO2 in the system
Returns
-------
float
Mole fraction of H2O in the H2O-CO2 fluid
"""
pressureMPa = pressure / 10.0
bulk_comp = {oxide: sample[oxide] for oxide in oxides}
bulk_comp["H2O"] = H2O
bulk_comp["CO2"] = CO2
feasible = melts.set_bulk_composition(bulk_comp)
output = melts.equilibrate_tp(temperature, pressureMPa, initialize=True)
(status, temperature, pressureMPa, xmlout) = output[0]
fluid_comp = melts.get_composition_of_phase(xmlout, phase_name='Fluid', mode='component')
#NOTE mode='component' returns endmember component keys with values in mol fraction.
if "Water" in fluid_comp:
H2O_fl = fluid_comp["Water"]
else:
H2O_fl = 0.0
# if H2O_fl == 0:
# raise SaturationError("Composition not fluid saturated.")
return H2O_fl
def save_excelfile(self, filename, calculations, sheet_name=None): #TODO how to handle if user just wants to normalize data?
"""
Saves data calculated by the user in batch processing mode (using the ExcelFile class methods) to an organized
excel file, with the original user data plus any calculated data.
Parameters
----------
filename: string
Name of the file. Extension (.xlsx) should be passed along with the name itself, all in quotes (e.g., 'myfile.xlsx').
calculations: list
List of variables containing calculated outputs from any of the core ExcelFile functions: calculate_dissolved_volatiles,
calculate_equilibrium_fluid_comp, and calculate_saturation_pressure.
sheet_name: None or list
OPTIONAL. Default value is None. Allows user to set the name of the sheet or sheets written to the Excel file.
Returns
-------
Excel File
Creates and saves an Excel file with data from each calculation saved to its own sheet.
"""
if isinstance(calculations, list):
if isinstance(sheet_name, list) or sheet_name is None:
pass
else:
raise InputError("calculations and sheet_name must be type list. If you only have one calculation or sheet_name to pass, make sure they are passed in square brackets []")
with pd.ExcelWriter(filename) as writer:
self.data.to_excel(writer, 'Original_User_Data')
if sheet_name is None:
for n, df in enumerate(calculations):
df.to_excel(writer, 'Calc%s' % n)
elif isinstance(sheet_name, list):
if len(sheet_name) == len(calculations):
pass
else:
raise InputError("calculations and sheet_name must have the same length")
for i in range(len(calculations)):
if isinstance(sheet_name[i], str):
calculations[i].to_excel(writer, sheet_name[i])
else:
raise InputError("if sheet_name is passed, it must be list of strings")
else:
raise InputError("sheet_name must be type list")
return print("Saved " + str(filename))
def calculate_dissolved_volatiles(self, temperature, pressure, X_fluid=1, print_status=True, model='MagmaSat', record_errors=False, **kwargs):
"""
Calculates the amount of H2O and CO2 dissolved in a magma at the given P/T conditions and fluid composition. Fluid composition
will be matched to within 0.0001 mole fraction.
Parameters
----------
temperature: float, int, or str
Temperature, in degrees C. Can be passed as float, in which case the
passed value is used as the temperature for all samples. Alternatively, temperature information for each individual
sample may already be present in the ExcelFile object. If so, pass the str value corresponding to the column
title in the ExcelFile object.
presure: float, int, or str
Pressure, in bars. Can be passed as float or int, in which case the
passed value is used as the pressure for all samples. Alternatively, pressure information for each individual
sample may already be present in the ExcelFile object. If so, pass the str value corresponding to the column
title in the ExcelFile object.
X_fluid: float, int, or str
OPTIONAL: Default value is 1. The mole fraction of H2O in the H2O-CO2 fluid. X_fluid=1 is a pure H2O fluid. X_fluid=0 is a
pure CO2 fluid. Can be passed as a float or int, in which case the passed value is used as the X_fluid for all samples.
Alternatively, X_fluid information for each individual sample may already be present in the ExcelFile object. If so, pass
the str value corresponding to the column title in the ExcelFile object.
print_status: bool
OPTIONAL: The default value is True, in which case the progress of the calculation will be printed to the terminal.
If set to False, nothing will be printed. MagmaSat calculations tend to be slow, and so a value of True is recommended
for most use cases.
model: string
The default value is 'MagmaSat'. Any other model name can be passed here as a string (in single quotes).
record_errors: bool
OPTIONAL: If True, any errors arising during the calculation will be recorded as a column.
Returns
-------
pandas DataFrame
Original data passed plus newly calculated values are returned.
"""
data = self.preprocess_sample(self.data)
dissolved_data = data.copy()
if isinstance(temperature, str):
file_has_temp = True
temp_name = temperature
elif isinstance(temperature, float) or isinstance(temperature, int):
file_has_temp = False
else:
raise InputError("temp must be type str or float or int")
if isinstance(pressure, str):
file_has_press = True
press_name = pressure
elif isinstance(pressure, float) or isinstance(pressure, int):
file_has_press = False
else:
raise InputError("pressure must be type str or float or int")
if isinstance(X_fluid, str):
file_has_X = True
X_name = X_fluid
elif isinstance(X_fluid, float) or isinstance(X_fluid, int):
file_has_X = False
if X_fluid != 0 and X_fluid !=1:
if X_fluid < 0.001 or X_fluid > 0.999:
raise InputError("X_fluid is calculated to a precision of 0.0001 mole fraction. \
Value for X_fluid must be between 0.0001 and 0.9999.")
else:
raise InputError("X_fluid must be type str or float or int")
H2Ovals = []
CO2vals = []
warnings = []
errors = []
if model in get_models(models='mixed'):
for index, row in dissolved_data.iterrows():
try:
if file_has_temp == True:
temperature = row[temp_name]
if file_has_press == True:
pressure = row[press_name]
if file_has_X == True:
X_fluid = row[X_name]
bulk_comp = {oxide: row[oxide] for oxide in oxides}
calc = calculate_dissolved_volatiles(sample=bulk_comp, pressure=pressure, temperature=temperature,
X_fluid=(X_fluid, 1-X_fluid), model=model,
silence_warnings=True, **kwargs)
H2Ovals.append(calc.result['H2O_liq'])
CO2vals.append(calc.result['CO2_liq'])
warnings.append(calc.calib_check)
errors.append('')
except Exception as inst:
H2Ovals.append(np.nan)
CO2vals.append(np.nan)
warnings.append('Calculation Failed.')
errors.append(sys.exc_info()[0])
dissolved_data["H2O_liq_VESIcal"] = H2Ovals
dissolved_data["CO2_liq_VESIcal"] = CO2vals
if file_has_temp == False:
dissolved_data["Temperature_C_VESIcal"] = temperature
if file_has_press == False:
dissolved_data["Pressure_bars_VESIcal"] = pressure
if file_has_X == False:
dissolved_data["X_fluid_input_VESIcal"] = X_fluid
dissolved_data["Model"] = model
dissolved_data["Warnings"] = warnings
if record_errors == True:
dissolved_data["Errors"] = errors
return dissolved_data
elif model == 'MagmaSat':
XH2Ovals = []
XCO2vals = []
FluidProportionvals = []
for index, row in dissolved_data.iterrows():
if print_status == True:
print("Calculating sample " + str(index))
try:
if file_has_temp == True:
temperature = row[temp_name]
if file_has_press == True:
pressure = row[press_name]
if file_has_X == True:
X_fluid = row[X_name]
bulk_comp = {oxide: row[oxide] for oxide in oxides}
calc = calculate_dissolved_volatiles(sample=bulk_comp, pressure=pressure, temperature=temperature,
X_fluid=X_fluid, model=model, silence_warnings=True,
verbose=True)
H2Ovals.append(calc.result['H2O_liq'])
CO2vals.append(calc.result['CO2_liq'])
XH2Ovals.append(calc.result['XH2O_fl'])
XCO2vals.append(calc.result['XCO2_fl'])
FluidProportionvals.append(calc.result['FluidProportion_wt'])
warnings.append(calc.calib_check)
errors.append('')
except Exception as inst:
H2Ovals.append(np.nan)
CO2vals.append(np.nan)
XH2Ovals.append(np.nan)
XCO2vals.append(np.nan)
FluidProportionvals.append(np.nan)
warnings.append('Calculation Failed.')
errors.append(sys.exc_info()[0])
dissolved_data["H2O_liq_VESIcal"] = H2Ovals
dissolved_data["CO2_liq_VESIcal"] = CO2vals
if file_has_temp == False:
dissolved_data["Temperature_C_VESIcal"] = temperature
if file_has_press == False:
dissolved_data["Pressure_bars_VESIcal"] = pressure
if file_has_X == False:
dissolved_data["X_fluid_input_VESIcal"] = X_fluid
dissolved_data["Model"] = model
dissolved_data["Warnings"] = warnings
if record_errors == True:
dissolved_data["Errors"] = errors
return dissolved_data
else:
XH2Ovals = []
XCO2vals = []
FluidProportionvals = []
for index, row in dissolved_data.iterrows():
if file_has_temp == True:
temperature = row[temp_name]
if file_has_press == True:
pressure = row[press_name]
if file_has_X == True:
X_fluid = row[X_name]
bulk_comp = {oxide: row[oxide] for oxide in oxides}
if 'Water' in model:
try:
calc = calculate_dissolved_volatiles(sample=bulk_comp, pressure=pressure, temperature=temperature,
X_fluid=X_fluid, model=model, silence_warnings=True)
H2Ovals.append(calc.result)
warnings.append(calc.calib_check)
except:
H2Ovals.append(0)
warnings.append('Calculation Failed #001')
if 'Carbon' in model:
try:
calc = calculate_dissolved_volatiles(sample=bulk_comp, pressure=pressure, temperature=temperature,
X_fluid=X_fluid, model=model, silence_warnings=True)
CO2vals.append(calc.result)
warnings.append(calc.calib_check)
except:
CO2vals.append(0)
warnings.append('Calculation Failed #002')
if 'Water' in model:
dissolved_data["H2O_liq_VESIcal"] = H2Ovals
if 'Carbon' in model:
dissolved_data["CO2_liq_VESIcal"] = CO2vals
if file_has_temp == False:
dissolved_data["Temperature_C_VESIcal"] = temperature
if file_has_press == False:
dissolved_data["Pressure_bars_VESIcal"] = pressure
if file_has_X == False:
dissolved_data["X_fluid_input_VESIcal"] = X_fluid
dissolved_data["Model"] = model
dissolved_data["Warnings"] = warnings
return dissolved_data
def calculate_equilibrium_fluid_comp(self, temperature, pressure, print_status=False, model='MagmaSat', **kwargs):
#TODO make molfrac the default
"""
Returns H2O and CO2 concentrations in wt% or mole fraction in a fluid in equilibrium with the given sample(s) at the given P/T condition.
Parameters
----------
sample: ExcelFile object
Compositional information on samples in oxides.
temperature: float, int, or str
Temperature, in degrees C. Can be passed as float, in which case the
passed value is used as the temperature for all samples. Alternatively, temperature information for each individual
sample may already be present in the ExcelFile object. If so, pass the str value corresponding to the column
title in the ExcelFile object.
presure: float, int, or str
Pressure, in bars. Can be passed as float or int, in which case the
passed value is used as the pressure for all samples. Alternatively, pressure information for each individual
sample may already be present in the ExcelFile object. If so, pass the str value corresponding to the column
title in the ExcelFile object.
model: string
OPTIONAL: Default is 'MagmaSat'. Any other model name can be passed here.
Returns
-------
pandas DataFrame
Original data passed plus newly calculated values are returned.
"""
data = self.preprocess_sample(self.data)
fluid_data = data.copy()
if isinstance(temperature, str):
file_has_temp = True
temp_name = temperature
elif isinstance(temperature, float) or isinstance(temperature, int):
file_has_temp = False
else:
raise InputError("temp must be type str or float or int")
if isinstance(pressure, str):
file_has_press = True
press_name = pressure
elif isinstance(pressure, float) or isinstance(pressure, int):
file_has_press = False
else:
raise InputError("pressure must be type str or float or int")
H2Ovals = []
CO2vals = []
warnings = []
if model in get_models(models='mixed') or model == "MooreWater":
for index, row in fluid_data.iterrows():
try:
if file_has_temp == True:
temperature = row[temp_name]
if file_has_press == True:
pressure = row[press_name]
bulk_comp = {oxide: row[oxide] for oxide in oxides}
calc = calculate_equilibrium_fluid_comp(sample=bulk_comp, pressure=pressure, temperature=temperature,
model=model, silence_warnings=True, **kwargs)
H2Ovals.append(calc.result['H2O'])
CO2vals.append(calc.result['CO2'])
warnings.append(calc.calib_check)
except:
H2Ovals.append(np.nan)
CO2vals.append(np.nan)
warnings.append("Calculation Failed.")
fluid_data["XH2O_fl_VESIcal"] = H2Ovals
fluid_data["XCO2_fl_VESIcal"] = CO2vals
if file_has_temp == False:
fluid_data["Temperature_C_VESIcal"] = temperature
if file_has_press == False:
fluid_data["Pressure_bars_VESIcal"] = pressure
fluid_data["Model"] = model
fluid_data["Warnings"] = warnings
return fluid_data
elif model == 'MagmaSat':
for index, row in fluid_data.iterrows():
if print_status == True:
print("Calculating sample " + str(index))
try:
if file_has_temp == True:
temperature = row[temp_name]
if file_has_press == True:
pressure = row[press_name]
bulk_comp = {oxide: row[oxide] for oxide in oxides}
calc = calculate_equilibrium_fluid_comp(sample=bulk_comp, pressure=pressure, temperature=temperature, model=model, silence_warnings=True)
H2Ovals.append(calc.result['H2O'])
CO2vals.append(calc.result['CO2'])
warnings.append(calc.calib_check)
except:
H2Ovals.append(np.nan)
CO2vals.append(np.nan)
warnings.append("Calculation Failed.")
fluid_data["XH2O_fl_VESIcal"] = H2Ovals
fluid_data["XCO2_fl_VESIcal"] = CO2vals
if file_has_temp == False:
fluid_data["Temperature_C_VESIcal"] = temperature
if file_has_press == False:
fluid_data["Pressure_bars_VESIcal"] = pressure
fluid_data["Model"] = model
fluid_data["Warnings"] = warnings
return fluid_data
else:
saturated = []
for index, row in fluid_data.iterrows():
try:
if file_has_temp == True:
temperature = row[temp_name]
if file_has_press == True:
pressure = row[press_name]
bulk_comp = {oxide: row[oxide] for oxide in oxides}
calc = calculate_equilibrium_fluid_comp(sample=bulk_comp, pressure=pressure, temperature=temperature, model=model, silence_warnings=True)
saturated.append(calc.result)
warnings.append(calc.calib_check)
except:
saturated.append(np.nan)
warnings.append("Calculation Failed.")
fluid_data["Saturated_VESIcal"] = saturated
if file_has_temp == False:
fluid_data["Temperature_C_VESIcal"] = temperature
if file_has_press == False:
fluid_data["Pressure_bars_VESIcal"] = pressure
fluid_data["Model"] = model
fluid_data["Warnings"] = warnings
return fluid_data
def calculate_saturation_pressure(self, temperature, print_status=True, model='MagmaSat', **kwargs): #TODO fix weird printing
"""
Calculates the saturation pressure of multiple sample compositions in the ExcelFile.
Parameters
----------
temperature: float, int, or str
Temperature at which to calculate saturation pressures, in degrees C. Can be passed as float or int, in which case the
passed value is used as the temperature for all samples. Alternatively, temperature information for each individual
sample may already be present in the passed ExcelFile object. If so, pass the str value corresponding to the column
title in the passed ExcelFile object.
print_status: bool
OPTIONAL: The default value is True, in which case the progress of the calculation will be printed to the terminal.
If set to False, nothing will be printed. MagmaSat calculations tend to be slow, and so a value of True is recommended
more most use cases.
model: string
OPTIONAL: Default is 'MagmaSat'. Any other model name can be passed here.
Returns
-------
pandas DataFrame object
Values returned are saturation pressure in bars, the mass of fluid present, and the composition of the
fluid present.
"""
data = self.preprocess_sample(self.data)
satp_data = data.copy()
if isinstance(temperature, str):
file_has_temp = True
temp_name = temperature
elif isinstance(temperature, float) or isinstance(temperature, int):
file_has_temp = False
else:
raise InputError("temperature must be type str or float or int")
if model != 'MagmaSat':
satP = []
warnings = []
for index, row in satp_data.iterrows():
try:
if file_has_temp == True:
temperature = row[temp_name]
bulk_comp = {oxide: row[oxide] for oxide in oxides}
calc = calculate_saturation_pressure(sample=bulk_comp, temperature=temperature,
model=model, silence_warnings=True, **kwargs)
satP.append(calc.result)
warnings.append(calc.calib_check)
except:
satP.append(np.nan)
warnings.append("Calculation Failed")
satp_data["SaturationP_bars_VESIcal"] = satP
if file_has_temp == False:
satp_data["Temperature_C_VESIcal"] = temperature
satp_data["Model"] = model
satp_data["Warnings"] = warnings
return satp_data
else:
satP = []
flmass = []
flH2O = []
flCO2 = []
flsystem_wtper = []
warnings = []
for index, row in satp_data.iterrows():
if print_status == True:
print("Calculating sample " + str(index))
try:
if file_has_temp == True:
temperature = row[temp_name]
bulk_comp = {oxide: row[oxide] for oxide in oxides}
calc = calculate_saturation_pressure(sample=bulk_comp, temperature=temperature, model=model, verbose=True, silence_warnings=True)
satP.append(calc.result["SaturationP_bars"])
flmass.append(calc.result["FluidMass_grams"])
flsystem_wtper.append(calc.result["FluidProportion_wt"])
flH2O.append(calc.result["XH2O_fl"])
flCO2.append(calc.result["XCO2_fl"])
warnings.append(calc.calib_check)
except:
satP.append(np.nan)
flmass.append(np.nan)
flsystem_wtper.append(np.nan)
flH2O.append(np.nan)
flCO2.append(np.nan)
warnings.append("Calculation Failed")
satp_data["SaturationP_bars_VESIcal"] = satP
if file_has_temp == False:
satp_data["Temperature_C_VESIcal"] = temperature
satp_data["XH2O_fl_VESIcal"] = flH2O
satp_data["XCO2_fl_VESIcal"] = flCO2
satp_data["FluidMass_grams_VESIcal"] = flmass
satp_data["FluidSystem_wt_VESIcal"] = flsystem_wtper
satp_data["Model"] = model
satp_data["Warnings"] = warnings
if print_status == True:
print("Done!")
return satp_data
class CalibrationRange(object):
""" The CalibrationRange object allows the range of allowable parameters to be specified and
used in checking and reporting of the results.
"""
def __init__(self, parameter_name, value, checkfunction=None, units='', model_name='',
fail_msg='',fail_dict={}, pass_msg='', pass_dict={}, description_msg='', description_dict={}):
self.parameter_name = parameter_name
self.value = value
self.checkfunction = checkfunction
self.units = units
self.model_name = model_name
self.fail_msg = (copy(fail_msg), copy(fail_dict))
self.pass_msg = (copy(pass_msg), copy(pass_dict))
self.description_msg = (copy(description_msg), copy(description_dict))
def check(self,parameters):
"""Method for checking whether parameters satisfy the calibration range."""
if self.parameter_name in parameters:
return self.checkfunction(self.value,parameters[self.parameter_name])
else:
return None
def string(self,parameters,report_nonexistance=True):
"""Returns a string statement of the calibration check"""
if type(parameters) == type(None):
msgdict = self.description_msg[1]
if type(self.value) == float or type(self.value) == int:
msgdict['calib_val'] = self.value
elif type(self.value) == list or type(self.value) == tuple or type(self.value) == np.ndarray:
for i in range(len(self.value)):
msgdict['calib_val'+str(i)] = self.value[i]
if 'param_name' not in msgdict:
msgdict['param_name'] = self.parameter_name
if 'units' not in msgdict:
msgdict['units'] = self.units
if 'model_name' not in msgdict:
msgdict['model_name'] = self.model_name
return self.description_msg[0].format(**msgdict)
else:
check = self.check(parameters)
if check == True:
msgdict = self.pass_msg[1]
msgdict['param_val'] = parameters[self.parameter_name]
if type(self.value) == float or type(self.value) == int:
msgdict['calib_val'] = self.value
elif type(self.value) == list or type(self.value) == tuple or type(self.value) == np.ndarray:
for i in range(len(self.value)):
msgdict['calib_val'+str(i)] = self.value[i]
if 'param_name' not in msgdict:
msgdict['param_name'] = self.parameter_name
if 'units' not in msgdict:
msgdict['units'] = self.units
if 'model_name' not in msgdict:
msgdict['model_name'] = self.model_name
return self.pass_msg[0].format(**msgdict)
elif check == False:
msgdict = self.fail_msg[1]
msgdict['param_val'] = parameters[self.parameter_name]
if type(self.value) == float or type(self.value) == int:
msgdict['calib_val'] = self.value
elif type(self.value) == list or type(self.value) == tuple or type(self.value) == np.ndarray:
for i in range(len(self.value)):
msgdict['calib_val'+str(i)] = self.value[i]
if 'param_name' not in msgdict:
msgdict['param_name'] = self.parameter_name
if 'units' not in msgdict:
msgdict['units'] = self.units
if 'model_name' not in msgdict:
msgdict['model_name'] = self.model_name
return self.fail_msg[0].format(**msgdict)
else:
if report_nonexistance == True:
return "A value for {} was not provided.".format(self.parameter_name)
else:
return ''
# class old_CalibrationRange(object):
# """ The CalibrationRange object allows the range of allowable parameters to be specified and
# used in checking and reporting of the results.
# """
# def __init__(self,parameter_name,value,unit='',modelname='',explanation_string=None,
# parameter_string=None,value_fmt="{:.1f}"):
# self.parameter_name = parameter_name
# self.value = value
# self.value_fmt = value_fmt
# self.model_name = modelname
# self.unit = unit
# self.explanation_string = explanation_string
# if parameter_string is not None:
# self.parameter_string = parameter_string
# else:
# self.parameter_string = parameter_name
#
# @abstractmethod
# def check(self,parameters):
# """Method for checking whether parameters satisfy the calibration range."""
# return True
#
# @abstractmethod
# def string(self,parameters):
# """Returns a string statement of the calibration check"""
# return 'No string return defined. '
class Model(object):
"""The model object implements a volatile solubility model. It is composed
of the methods needed to evaluate :func:`VESIcal.calculate_dissolved_volatiles`,
:func:`VESIcal.calculate_equilibrium_fluid_comp`, and :func:`calculate_saturation_pressure`. The
fugacity and activity models for the volatiles species must be specified,
defaulting to ideal.
"""
def __init__(self):
self.set_volatile_species(None)
self.set_fugacity_model(fugacity_idealgas())
self.set_activity_model(activity_idealsolution())
self.set_calibration_ranges([])
self.set_solubility_dependence(False)
def set_volatile_species(self,volatile_species):
if type(volatile_species) == str:
volatile_species = [volatile_species]
elif type(volatile_species) != list:
raise InputError("volatile_species must be a str or list.")
self.volatile_species = volatile_species
def set_fugacity_model(self,fugacity_model):
self.fugacity_model = fugacity_model
def set_activity_model(self,activity_model):
self.activity_model = activity_model
def set_calibration_ranges(self,calibration_ranges):
self.calibration_ranges = calibration_ranges
def set_solubility_dependence(self,solubility_dependence):
self.solubility_dependence = solubility_dependence
@abstractmethod
def calculate_dissolved_volatiles(self,**kwargs):
pass
@abstractmethod
def calculate_equilibrium_fluid_comp(self,**kwargs):
pass
@abstractmethod
def calculate_saturation_pressure(self,**kwargs):
pass
@abstractmethod
def preprocess_sample(self,**kwargs):
pass
# @abstractmethod
def check_calibration_range(self,parameters,report_nonexistance=True):
""" Checks whether the given parameters are within the ranges defined by the
CalibrationRange objects for the model and its fugacity and activity models. An empty
string will be returned if all parameters are within the calibration range. If a
parameter is not within the calibration range, a description of the problem will be
returned in the string.
Parameters
----------
parameters dict
Dictionary keys are the names of the parameters to be checked, e.g., pressure
temperature, SiO2, etc. Values are the values of each parameter. A complete set
need not be given.
Returns
-------
str
String description of any parameters falling outside of the calibration range.
"""
s = ''
for cr in self.calibration_ranges:
if cr.check(parameters) == False:
s += cr.string(parameters,report_nonexistance)
for cr in self.fugacity_model.calibration_ranges:
if cr.check(parameters) == False:
s += cr.string(parameters,report_nonexistance)
for cr in self.activity_model.calibration_ranges:
if cr.check(parameters) == False:
s += cr.string(parameters,report_nonexistance)
return s
def get_calibration_range(self):
""" Returns a string describing the calibration ranges defined by the CalibrationRange
objects for each model, and its associated fugacity and activity models.
Returns
-------
str
String description of the calibration range objects."""
s = ''
for cr in self.calibration_ranges:
s += cr.string(None)
for cr in self.fugacity_model.calibration_ranges:
s += cr.string(None)
for cr in self.activity_model.calibration_ranges:
s += cr.string(None)
return s
class FugacityModel(object):
""" The fugacity model object is for implementations of fugacity models
for individual volatile species, though it may depend on the mole
fraction of other volatile species. It contains all the methods required
to calculate the fugacity at a given pressure and mole fraction.
"""
def __init__(self):
self.set_calibration_ranges([])
def set_calibration_ranges(self,calibration_ranges):
self.calibration_ranges = calibration_ranges
@abstractmethod
def fugacity(self,pressure,**kwargs):
"""
"""
# @abstractmethod
def check_calibration_range(self,parameters,report_nonexistance=True):
s = ''
for cr in self.calibration_ranges:
if cr.check(parameters) == False:
s += cr.string(parameters,report_nonexistance)
return s
class activity_model(object):
""" The activity model object is for implementing activity models
for volatile species in melts. It contains all the methods required to
evaluate the activity.
"""
def __init__(self):
self.set_calibration_ranges([])
def set_calibration_ranges(self,calibration_ranges):
self.calibration_ranges = calibration_ranges
@abstractmethod
def activity(self,X,**kwargs):
"""
"""
# @abstractmethod
def check_calibration_range(self,parameters,report_nonexistance=True):
s = ''
for cr in self.calibration_ranges:
if cr.check(parameters) == False:
s += cr.string(parameters,report_nonexistance)
return s
class Calculate(object):
""" The Calculate object is a template for implementing user-friendly methods for
running calculations using the volatile solubility models. All Calculate methods
have a common workflow- sample is read in, preprocessed, the calculation is performed,
the calibration range is checked, and the results stored.
"""
def __init__(self,sample,model='MagmaSat',silence_warnings=False,preprocess_sample=False,**kwargs):
if model == 'MagmaSat':
self.model = MagmaSat()
elif type(model) == str:
self.model = default_models[model]
else:
self.model = model
self.sample = sample.copy()
if preprocess_sample == True:
self.sample = self.model.preprocess_sample(self.sample)
self.result = self.calculate(sample=self.sample,**kwargs)
self.calib_check = self.check_calibration_range(sample=self.sample,**kwargs)
if self.calib_check is not None and silence_warnings == False:
if self.calib_check != '':
warnings.warn(self.calib_check,RuntimeWarning)
@abstractmethod
def calculate(self):
""" """
@abstractmethod
def check_calibration_range(self):
""" """
#-------------DEFAULT CALIBRATIONRANGE OBJECTS---------------#
def crf_EqualTo(calibval,paramval):
return calibval == paramval
crmsg_EqualTo_pass = "The {param_name} ({param_val:.1f} {units}) is equal to {calib_val:.1f} {units} as required by the calibration range of the {model_name} model. "
crmsg_EqualTo_fail = "The {param_name} is outside the calibration range of the {model_name} model, as {param_val:.1f} {units} is not equal to {calib_val:.1f} {units}. "
crmsg_EqualTo_description = "The {model_name} model is calibrated for {param_name} equal to {calib_val:.1f} {units}. "
def crf_GreaterThan(calibval,paramval):
return paramval > calibval
crmsg_GreaterThan_pass = "The {param_name} ({param_val:.1f} {units}) is greater than {calib_val:.1f} {units} as required by the calibration range of the {model_name} model. "
crmsg_GreaterThan_fail = "The {param_name} is outside the calibration range of the {model_name} model, as {param_val:.1f} {units} is not greater than {calib_val:.1f} {units}. "
crmsg_GreaterThan_description = "The {model_name} model is calibrated for {param_name} greater than {calib_val:.1f} {units}. "
def crf_LessThan(calibval,paramval):
return paramval < calibval
crmsg_LessThan_pass = "The {param_name} ({param_val:.1f} {units}) is less than {calib_val:.1f} {units} as required by the calibration range of the {model_name} model. "
crmsg_LessThan_fail = "The {param_name} is outside the calibration range of the {model_name} model, as {param_val:.1f} {units} is not less than {calib_val:.1f} {units}. "
crmsg_LessThan_description = "The {model_name} model is calibrated for {param_name} less than {calib_val:.1f} {units}. "
def crf_Between(calibval,paramval):
return paramval > calibval[0] and paramval < calibval[1]
crmsg_Between_pass = "The {param_name} ({param_val:.1f} {units}) is between {calib_val0:.1f} and {calib_val1:.1f} {units} as required by the calibration range of the {model_name} model. "
crmsg_Between_fail = "The {param_name} is outside the calibration range of the {model_name} model, as {param_val:.1f} {units} is not between {calib_val0:.1f} and {calib_val1:.1f} {units}. "
crmsg_Between_description = "The {model_name} model is calibrated for {param_name} between {calib_val0:.1f} and {calib_val1:.1f} {units}. "
def crf_LiuComp(calibval=None,sample={}):
SiTest = sample['SiO2'] >= 75.0 and sample['SiO2'] <= 77.0
NaTest = sample['Na2O'] >= 3.4 and sample['Na2O'] <= 4.7
KTest = sample['K2O'] >= 3.6 and sample['K2O'] <= 5.7
AlTest = sample['Al2O3'] >= 12.1 and sample['Al2O3'] <= 13.5
return all([SiTest, NaTest, KTest, AlTest])
crmsg_LiuComp_pass = "The sample appears to be similar in composition to the rhyolites and haplogranites used to calibrate the Liu et al. model."
crmsg_LiuComp_fail = "As the Liu et al. model incorperates no term for compositional dependence, users must take extreme care when extrapolating this model to compositions which differ significantly from the haplogranites and rhyolites in the calibration dataset. These warnings are simply a guide; we suggest that users carefully compare their major element data to the calibration dataset to check for suitability."
crmsg_LiuComp_description = "The Liu et al. model is suitable for haplogranites and rhyolites."
#-------------FUGACITY MODELS--------------------------------#
class fugacity_idealgas(FugacityModel):
""" An instance of FugacityModel for an ideal gas.
"""
def fugacity(self,pressure,X_fluid=1.0,**kwargs):
""" Returns the fugacity of an ideal gas, i.e., the partial pressure.
Parameters
----------
pressure float
Total pressure of the system, in bars.
X_fluid float
The mole fraction of the species in the vapour phase.
Returns
-------
float
Fugacity (partial pressure) in bars
"""
return pressure*X_fluid
class fugacity_KJ81_co2(FugacityModel):
""" Implementation of the Kerrick and Jacobs (1981) EOS for mixed fluids. This class
will return the properties of the CO2 component of the mixed fluid.
"""
def __init__(self):
self.set_calibration_ranges([CalibrationRange('pressure',20000.0,crf_LessThan,'bar','Kerrick and Jacobs (1981) EOS',
fail_msg=crmsg_LessThan_fail, pass_msg=crmsg_LessThan_pass, description_msg=crmsg_LessThan_description),
CalibrationRange('temperature',1050,crf_LessThan,'oC','Kerrick and Jacobs (1981) EOS',
fail_msg=crmsg_LessThan_fail, pass_msg=crmsg_LessThan_pass, description_msg=crmsg_LessThan_description)])
def fugacity(self,pressure,temperature,X_fluid,**kwargs):
""" Calculates the fugacity of CO2 in a mixed CO2-H2O fluid. Above 1050C,
it assumes H2O and CO2 do not interact, as the equations are not defined
beyond this point.
Parameters
----------
pressure float
Total pressure of the system in bars.
temperature float
Temperature in degC
X_fluid float
Mole fraction of CO2 in the fluid.
Returns
-------
float
fugacity of CO2 in bars
"""
if X_fluid == 0:
return 0
elif temperature >= 1050.0:
return pressure*np.exp(self.lnPhi_mix(pressure,temperature,1.0))*X_fluid
else:
return pressure*np.exp(self.lnPhi_mix(pressure,temperature,X_fluid))*X_fluid
def volume(self,P,T,X_fluid):
""" Calculates the volume of the mixed fluid, by solving Eq (28) of Kerrick and
Jacobs (1981) using scipy.root_scalar.
Parameters
----------
P float
Total pressure of the system, in bars.
T float
Temperature in degC
X_fluid float
Mole fraction of CO2 in the fluid
Returns
-------
float
Volume of the mixed fluid.
"""
if X_fluid != 1.0:
# x0 = self.volume(P,T,1.0)*X_fluid + self.volume_h(P,T)*(1-X_fluid)
# print(x0)
if P >= 20000 and T<800-273.15:
x0 = (X_fluid*25+(1-X_fluid)*15)
else:
x0 = (X_fluid*35+(1-X_fluid)*15)
else:
if P >= 20000 and T<800-273.15:
x0 = 25
else:
x0=35
return root_scalar(self.root_volume,x0=x0,x1=x0*0.9,args=(P,T,X_fluid)).root
def root_volume(self,v,P,T,X_fluid):
""" Returns the difference between the lhs and rhs of Eq (28) of Kerrick and Jacobs (1981).
For use with a root finder to obtain the volume of the mixed fluid.
Parameters
----------
v float
Guess for the volume
P float
Total system pressure in bars.
T float
Temperature in degC
X_fluid float
Mole fraction of CO2 in the fluid.
Returns
-------
float
Difference between lhs and rhs of Eq (28) of Kerrick and Jacobs (1981), in bars.
"""
T = T + 273.15
c = {}
h = {}
c['b'] = 58.0
c['c'] = (28.31 + 0.10721*T - 8.81e-6*T**2)*1e6
c['d'] = (9380.0 - 8.53*T + 1.189e-3*T**2)*1e6
c['e'] = (-368654.0 + 715.9*T + 0.1534*T**2)*1e6
h['b'] = 29.0
h['c'] = (290.78 - 0.30276*T + 1.4774e-4*T**2)*1e6#3
h['d'] = (-8374.0 + 19.437*T - 8.148e-3*T**2)*1e6
h['e'] = (76600.0 - 133.9*T + 0.1071*T**2)*1e6
if X_fluid == 1:
bm = c['b']
cm = c['c']
c12= c['c']
dm = c['d']
d12= c['d']
em = c['e']
e12 =c['e']
else:
bm = X_fluid*c['b'] + (1-X_fluid)*h['b']
c12 = (c['c']*h['c'])**0.5
cm = c['c']*X_fluid**2 + h['c']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*c12
d12 = (c['d']*h['d'])**0.5
dm = c['d']*X_fluid**2 + h['d']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*d12
e12 = (c['e']*h['e'])**0.5
em = c['e']*X_fluid**2 + h['e']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*e12
am = cm + dm/v + em/v**2
y = bm/(4*v)
pt1 = (83.14 * T * (1 + y + y**2 - y**3)) / (v*(1-y)**3)
pt2 = - am / (T**0.5 * v * (v+bm))
return -(P - pt1 - pt2)
def volume_h(self,P,T):
""" Calculates the volume of a pure H2O fluid, by solving Eq (14) of
Kerrick and Jacobs (1981).
Parameters
----------
P float
Total pressure in bars.
T float
Temperature in degC.
Returns
-------
Difference between lhs and rhs of Eq (14) of Kerrick and Jacobs (1981), in bars.
"""
return root_scalar(self.root_volume_h,x0=15,x1=35,args=(P,T)).root
def root_volume_h(self,v,P,T):
""" Returns the difference between the lhs and rhs of Eq (14) of
Kerrick and Jacobs (1981). For use with a root solver to identify the
volume of a pure H2O fluid.
Parameters
----------
v float
Guess for the volume
P float
Total pressure in bars.
T float
Temperature in degC.
Returns
-------
float
The difference between the lhs and rhs of Eq (14) of Kerrick and Jacobs (1981),
in bars.
"""
T = T + 273.15
h = {}
h['b'] = 29.0
h['c'] = (290.78 - 0.30276*T + 1.4774e-4*T**2)*1e6#3
h['d'] = (-8374.0 + 19.437*T - 8.148e-3*T**2)*1e6
h['e'] = (76600.0 - 133.9*T + 0.1071*T**2)*1e6
h['a'] = h['c'] + h['d']/v + h['e']/v**2
y = h['b']/(4*v)
pt1 = (83.14 * T * (1 + y + y**2 - y**3)) / (v*(1-y)**3)
pt2 = - h['a'] / (T**0.5 * v * (v+h['b']))
return -(P - pt1 - pt2)
def lnPhi_mix(self,P,T,X_fluid):
""" Calculates the natural log of the fugacity coefficient for CO2 in a
mixed CO2-H2O fluid. Uses Eq (27) of Kerrick and Jacobs (1981).
Parameters
----------
P float
Total pressure in bars.
T float
Temperature in degC
X_fluid float
The mole fraction of CO2 in the fluid.
Returns
-------
float
The natural log of the fugacity coefficient for CO2 in a mixed fluid.
"""
T = T + 273.15
v = self.volume(P,T-273.15,X_fluid)
c = {}
h = {}
c['b'] = 58.0
c['c'] = (28.31 + 0.10721*T - 8.81e-6*T**2)*1e6
c['d'] = (9380.0 - 8.53*T + 1.189e-3*T**2)*1e6
c['e'] = (-368654.0 + 715.9*T + 0.1534*T**2)*1e6
h['b'] = 29.0
h['c'] = (290.78 - 0.30276*T + 1.4774e-4*T**2)*1e6#3
h['d'] = (-8374.0 + 19.437*T - 8.148e-3*T**2)*1e6
h['e'] = (76600.0 - 133.9*T + 0.1071*T**2)*1e6
if X_fluid == 1:
bm = c['b']
cm = c['c']
c12= c['c']
dm = c['d']
d12= c['d']
em = c['e']
e12 =c['e']
else:
bm = X_fluid*c['b'] + (1-X_fluid)*h['b']
c12 = (c['c']*h['c'])**0.5
cm = c['c']*X_fluid**2 + h['c']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*c12
d12 = (c['d']*h['d'])**0.5
dm = c['d']*X_fluid**2 + h['d']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*d12
e12 = (c['e']*h['e'])**0.5
em = c['e']*X_fluid**2 + h['e']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*e12
am = cm + dm/v + em/v**2
y = bm/(4*v)
# Z = (1+y+y**2-y**3)/(1-y)**2 - am/(83.14*T**1.5*(v+bm))
Z = v*P/(83.14*T)
lnPhi = 0
lnPhi += (4*y-3*y**2)/(1-y)**2 + (c['b']/bm * (4*y-2*y**2)/(1-y)**3)
lnPhi += - (2*c['c']*X_fluid+2*(1-X_fluid)*c12)/(83.14*T**1.5*bm)*np.log((v+bm)/v)
lnPhi += - cm*c['b']/(83.14*T**1.5*bm*(v+bm))
lnPhi += cm*c['b']/(83.14*T**1.5*bm**2)*np.log((v+bm)/v)
lnPhi += - (2*c['d']*X_fluid+2*d12*(1-X_fluid)+dm)/(83.14*T**1.5*bm*v)
lnPhi += (2*c['d']*X_fluid+2*(1-X_fluid)*d12+dm)/(83.14*T**1.5*bm**2)*np.log((v+bm)/v)
lnPhi += c['b']*dm/(83.14*T**1.5*v*bm*(v+bm)) + 2*c['b']*dm/(83.14*T**1.5*bm**2*(v+bm))
lnPhi += - 2*c['b']*dm/(83.14*T**1.5*bm**3)*np.log((v+bm)/v)
lnPhi += - (2*c['e']*X_fluid + 2*(1-X_fluid)*e12+2*em)/(83.14*T**1.5*2*bm*v**2)
lnPhi += (2*c['e']*X_fluid+2*e12*(1-X_fluid)+2*em)/(83.14*T**1.5*bm**2*v)
lnPhi += - (2*c['e']*X_fluid+2*e12*(1-X_fluid)+2*em)/(83.14*T**1.5*bm**3)*np.log((v+bm)/v)
lnPhi += em*c['b']/(83.14*T**1.5*2*bm*v**2*(v+bm)) - 3*em*c['b']/(83.14*T**1.5*2*bm**2*v*(v+bm))
lnPhi += 3*em*c['b']/(83.14*T**1.5*bm**4)*np.log((v+bm)/v) - 3*em*c['b']/(83.14*T**1.5*bm**3*(v+bm))
lnPhi += - np.log(Z)
return lnPhi
class fugacity_KJ81_h2o(FugacityModel):
"""Implementation of the Kerrick and Jacobs (1981) EOS for mixed fluids. This class
will return the properties of the H2O component of the mixed fluid.
"""
def __init__(self):
self.set_calibration_ranges([CalibrationRange('pressure',20000.0,crf_LessThan,'bar','Kerrick and Jacobs (1981) EOS',
fail_msg=crmsg_LessThan_fail, pass_msg=crmsg_LessThan_pass, description_msg=crmsg_LessThan_description),
CalibrationRange('temperature',1050,crf_LessThan,'oC','Kerrick and Jacobs (1981) EOS',
fail_msg=crmsg_LessThan_fail, pass_msg=crmsg_LessThan_pass, description_msg=crmsg_LessThan_description)])
def fugacity(self,pressure,temperature,X_fluid,**kwargs):
""" Calculates the fugacity of H2O in a mixed CO2-H2O fluid. Above 1050C,
it assumes H2O and CO2 do not interact, as the equations are not defined
beyond this point.
Parameters
----------
pressure float
Total pressure of the system in bars.
temperature float
Temperature in degC
X_fluid float
Mole fraction of H2O in the fluid.
Returns
-------
float
fugacity of H2O in bars
"""
if X_fluid == 0:
return 0
elif temperature >= 1050:
return pressure*np.exp(self.lnPhi_mix(pressure,temperature,1.0))*X_fluid
else:
return pressure*np.exp(self.lnPhi_mix(pressure,temperature,X_fluid))*X_fluid
def volume(self,P,T,X_fluid):
""" Calculates the volume of the mixed fluid, by solving Eq (28) of Kerrick and
Jacobs (1981) using scipy.root_scalar.
Parameters
----------
P float
Total pressure of the system, in bars.
T float
Temperature in degC
X_fluid float
Mole fraction of H2O in the fluid
Returns
-------
float
Volume of the mixed fluid.
"""
if X_fluid != 1.0:
# x0 = self.volume(P,T,1.0)*X_fluid + self.volume_h(P,T)*(1-X_fluid)
# print(x0)
if P >= 20000 and T<800-273.15:
x0 = ((1-X_fluid)*25+X_fluid*15)
else:
x0 = ((1-X_fluid)*35+X_fluid*15)
else:
if P >= 20000 and T<800-273.15:
x0 = 10
else:
x0=15
return root_scalar(self.root_volume,x0=x0,x1=x0*0.9,args=(P,T,X_fluid)).root
def root_volume(self,v,P,T,X_fluid):
""" Returns the difference between the lhs and rhs of Eq (28) of Kerrick and Jacobs (1981).
For use with a root finder to obtain the volume of the mixed fluid.
Parameters
----------
v float
Guess for the volume
P float
Total system pressure in bars.
T float
Temperature in degC
X_fluid float
Mole fraction of H2O in the fluid.
Returns
-------
float
Difference between lhs and rhs of Eq (28) of Kerrick and Jacobs (1981), in bars.
"""
T = T + 273.15
c = {}
h = {}
c['b'] = 58.0
c['c'] = (28.31 + 0.10721*T - 8.81e-6*T**2)*1e6
c['d'] = (9380.0 - 8.53*T + 1.189e-3*T**2)*1e6
c['e'] = (-368654.0 + 715.9*T + 0.1534*T**2)*1e6
h['b'] = 29.0
h['c'] = (290.78 - 0.30276*T + 1.4774e-4*T**2)*1e6#3
h['d'] = (-8374.0 + 19.437*T - 8.148e-3*T**2)*1e6
h['e'] = (76600.0 - 133.9*T + 0.1071*T**2)*1e6
if X_fluid == 1:
bm = h['b']
cm = h['c']
dm = h['d']
em = h['e']
c12= h['c']
d12= h['d']
e12= h['e']
else:
bm = X_fluid*h['b'] + (1-X_fluid)*c['b']
c12 = (c['c']*h['c'])**0.5
cm = h['c']*X_fluid**2 + c['c']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*c12
d12 = (c['d']*h['d'])**0.5
dm = h['d']*X_fluid**2 + c['d']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*d12
e12 = (c['e']*h['e'])**0.5
em = h['e']*X_fluid**2 + c['e']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*e12
am = cm + dm/v + em/v**2
y = bm/(4*v)
pt1 = (83.14 * T * (1 + y + y**2 - y**3)) / (v*(1-y)**3)
pt2 = - am / (T**0.5 * v * (v+bm))
return -(P - pt1 - pt2)
def volume_c(self,P,T):
""" Calculates the volume of a pure CO2 fluid, by solving Eq (14) of
Kerrick and Jacobs (1981).
Parameters
----------
P float
Total pressure in bars.
T float
Temperature in degC.
Returns
-------
Difference between lhs and rhs of Eq (14) of Kerrick and Jacobs (1981), in bars.
"""
return root_scalar(self.root_volume_c,x0=15,x1=35,args=(P,T)).root
def root_volume_c(self,v,P,T):
""" Returns the difference between the lhs and rhs of Eq (14) of
Kerrick and Jacobs (1981). For use with a root solver to identify the
volume of a pure H2O fluid.
Parameters
----------
v float
Guess for the volume
P float
Total pressure in bars.
T float
Temperature in degC.
Returns
-------
float
The difference between the lhs and rhs of Eq (14) of Kerrick and Jacobs (1981),
in bars.
"""
T = T + 273.15
c = {}
c['b'] = 58.0
c['c'] = (28.31 + 0.10721*T - 8.81e-6*T**2)*1e6
c['d'] = (9380.0 - 8.53*T + 1.189e-3*T**2)*1e6
c['e'] = (-368654.0 + 715.9*T + 0.1534*T**2)*1e6
c['a'] = c['c'] + c['d']/v + c['e']/v**2
y = c['b']/(4*v)
pt1 = (83.14 * T * (1 + y + y**2 - y**3)) / (v*(1-y)**3)
pt2 = - c['a'] / (T**0.5 * v * (v+c['b']))
return -(P - pt1 - pt2)
def lnPhi_mix(self,P,T,X_fluid):
""" Calculates the natural log of the fugacity coefficient for H2O in a
mixed CO2-H2O fluid. Uses Eq (27) of Kerrick and Jacobs (1981).
Parameters
----------
P float
Total pressure in bars.
T float
Temperature in degC
X_fluid float
The mole fraction of H2O in the fluid.
Returns
-------
float
The natural log of the fugacity coefficient for H2O in a mixed fluid.
"""
T = T + 273.15
v = self.volume(P,T-273.15,X_fluid)
c = {}
h = {}
c['b'] = 58.0
c['c'] = (28.31 + 0.10721*T - 8.81e-6*T**2)*1e6
c['d'] = (9380.0 - 8.53*T + 1.189e-3*T**2)*1e6
c['e'] = (-368654.0 + 715.9*T + 0.1534*T**2)*1e6
h['b'] = 29.0
h['c'] = (290.78 - 0.30276*T + 1.4774e-4*T**2)*1e6#3
h['d'] = (-8374.0 + 19.437*T - 8.148e-3*T**2)*1e6
h['e'] = (76600.0 - 133.9*T + 0.1071*T**2)*1e6
if X_fluid == 1:
bm = h['b']
cm = h['c']
dm = h['d']
em = h['e']
c12= h['c']
d12= h['d']
e12= h['e']
else:
bm = X_fluid*h['b'] + (1-X_fluid)*c['b']
c12 = (c['c']*h['c'])**0.5
cm = h['c']*X_fluid**2 + c['c']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*c12
d12 = (c['d']*h['d'])**0.5
dm = h['d']*X_fluid**2 + c['d']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*d12
e12 = (c['e']*h['e'])**0.5
em = h['e']*X_fluid**2 + c['e']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*e12
am = cm + dm/v + em/v**2
y = bm/(4*v)
# Z = (1+y+y**2-y**3)/(1-y)**2 - am/(83.14*T**1.5*(v+bm))
Z = v*P/(83.14*T)
lnPhi = 0
lnPhi += (4*y-3*y**2)/(1-y)**2 + (h['b']/bm * (4*y-2*y**2)/(1-y)**3)
lnPhi += - (2*h['c']*X_fluid+2*(1-X_fluid)*c12)/(83.14*T**1.5*bm)*np.log((v+bm)/v)
lnPhi += - cm*h['b']/(83.14*T**1.5*bm*(v+bm))
lnPhi += cm*h['b']/(83.14*T**1.5*bm**2)*np.log((v+bm)/v)
lnPhi += - (2*h['d']*X_fluid+2*d12*(1-X_fluid)+dm)/(83.14*T**1.5*bm*v)
lnPhi += (2*h['d']*X_fluid+2*(1-X_fluid)*d12+dm)/(83.14*T**1.5*bm**2)*np.log((v+bm)/v)
lnPhi += h['b']*dm/(83.14*T**1.5*v*bm*(v+bm)) + 2*h['b']*dm/(83.14*T**1.5*bm**2*(v+bm))
lnPhi += - 2*h['b']*dm/(83.14*T**1.5*bm**3)*np.log((v+bm)/v)
lnPhi += - (2*h['e']*X_fluid + 2*(1-X_fluid)*e12+2*em)/(83.14*T**1.5*2*bm*v**2)
lnPhi += (2*h['e']*X_fluid+2*e12*(1-X_fluid)+2*em)/(83.14*T**1.5*bm**2*v)
lnPhi += - (2*h['e']*X_fluid+2*e12*(1-X_fluid)+2*em)/(83.14*T**1.5*bm**3)*np.log((v+bm)/v)
lnPhi += em*h['b']/(83.14*T**1.5*2*bm*v**2*(v+bm)) - 3*em*h['b']/(83.14*T**1.5*2*bm**2*v*(v+bm))
lnPhi += 3*em*h['b']/(83.14*T**1.5*bm**4)*np.log((v+bm)/v) - 3*em*h['b']/(83.14*T**1.5*bm**3*(v+bm))
lnPhi += - np.log(Z)
return lnPhi
class fugacity_ZD09_co2(FugacityModel):
""" Implementation of the Zhang and Duan (2009) fugacity model for pure CO2
fluids."""
def __init__(self):
self.set_calibration_ranges([CalibrationRange('pressure',[1,1e5],crf_Between,'bar','Zhang and Duan (2009) EOS',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',[200,2300],crf_Between,'oC','Zhang and Duan (2009) EOS',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description)])
def fugacity(self,pressure,temperature,X_fluid=1.0,**kwargs):
""" Calculates the fugacity of a pure CO2 fluid, or a mixed fluid assuming
ideal mixing. Implements eqn (14) of Zhang and Duan (2009).
Paramters
---------
pressure float
Pressure in bars
temperature float
Temperature in degC
X_fluid float
Mole fraction of CO2 in the fluid. Default is 1.0.
Returns
-------
float
Fugacity of CO2, standard state 1 bar.
"""
P = pressure/10
T = temperature + 273.15
a = np.array([0.0,
2.95177298930e-2,
-6.33756452413e3,
-2.75265428882e5,
1.29128089283e-3,
-1.45797416153e2,
7.65938947237e4,
2.58661493537e-6,
0.52126532146,
-1.39839523753e2,
-2.36335007175e-8,
5.35026383543e-3,
-0.27110649951,
2.50387836486e4,
0.73226726041,
1.5483335997e-2])
e = 235.0
s = 3.79
Pm = 3.0636*P*s**3/e
Tm = 154*T/e
Vm = root_scalar(self.Vm,x0=200,x1=100,args=(P,T)).root
S1 = ((a[1]+a[2]/Tm**2+a[3]/Tm**3)/Vm+
(a[4]+a[5]/Tm**2+a[6]/Tm**3)/(2*Vm**2)+
(a[7]+a[8]/Tm**2+a[9]/Tm**3)/(4*Vm**4)+
(a[10]+a[11]/Tm**2+a[12]/Tm**3)/(5*Vm**5)+
(a[13]/(2*a[15]*Tm**3)*(a[14]+1-(a[14]+1+a[15]/Vm**2)*
np.exp(-a[15]/Vm**2)))
)
Z = Pm*Vm/(8.314*Tm)
lnfc = Z - 1 - np.log(Z) + S1
return P*np.exp(lnfc)*10
def Vm(self,Vm,P,T):
""" Function to use for solving for the parameter Vm, defined by eqn (8) of
Zhang and Duan (2009). Called by scipy.fsolve in the fugacity method.
Parameters
----------
Vm float
Guessed value of Vm
P float
Pressure in MPa
T float
Temperature in K
Returns
-------
float
Difference between (rearranged) LHS and RHS of eqn (8) of Zhang and Duan (2009).
"""
Pm = 3.0636*P*3.79**3/235.0
Tm = 154*T/235.0
a = np.array([0.0,
2.95177298930e-2,
-6.33756452413e3,
-2.75265428882e5,
1.29128089283e-3,
-1.45797416153e2,
7.65938947237e4,
2.58661493537e-6,
0.52126532146,
-1.39839523753e2,
-2.36335007175e-8,
5.35026383543e-3,
-0.27110649951,
2.50387836486e4,
0.73226726041,
1.5483335997e-2])
return ((1+(a[1]+a[2]/Tm**2+a[3]/Tm**3)/Vm+
(a[4]+a[5]/Tm**2+a[6]/Tm**3)/Vm**2+
(a[7]+a[8]/Tm**2+a[9]/Tm**3)/Vm**4)*0.08314*Tm/Pm - Vm
)
class fugacity_MRK_co2(FugacityModel):
""" Modified Redlick Kwong fugacity model as used by VolatileCalc. Python implementation by
<NAME> (github.com/DJRgeoscience/VolatileCalcForPython), based on VB code by Newman &
Lowenstern.
"""
def __init__(self):
self.set_calibration_ranges([])
def fugacity(self,pressure,temperature,X_fluid=1.0,**kwargs):
""" Calculates the fugacity of CO2 in a pure or mixed H2O-CO2 fluid (assuming ideal mixing).
Parameters
----------
pressure float
Total pressure of the system in bars.
temperature float
Temperature in degC
X_fluid float
Mole fraction of CO2 in the fluid.
Returns
-------
float
fugacity of CO2 in bars
"""
fug = self.MRK(pressure,temperature+273.15)
return fug*X_fluid
def FNA(self,TK):
return (166800000 - 193080 * (TK - 273.15) + 186.4 * (TK - 273.15)**2 - 0.071288 * ((TK - 273.15)**3)) * 1.01325
def FNB(self,TK):
return 1.01325 * (73030000 - 71400 * (TK - 273.15) + 21.57 * (TK - 273.15)**2)
def FNC(self,TK):
R = 83.14321
return 1.01325 * (np.exp(-11.071 + 5953 / TK - 2746000 / TK**2 + 464600000 / TK**3) * 0.5 * R * R * TK**2.5 / 1.02668 + 40123800)
def FNF(self,V,TK,A,B,P):
R = 83.14321
return R * TK / (V - B) - A / ((V * V + B * V) * TK**0.5) - P
def MRK(self,P,TK): #Redlich-Kwong routine to estimate endmember H2O and CO2 fugacities
R = 83.14321
B_1 = 14.6
B_2 = 29.7
for X_1 in [0,1]:
B = X_1 * B_1 + (1 - X_1) * B_2
A = X_1**2 * self.FNA(TK) + 2 * X_1 * (1 - X_1) * self.FNC(TK) + (1 - X_1)**2 * self.FNB(TK)
Temp2 = B + 5
Q = 1
Temp1 = 0
while abs(Temp2 - Temp1) >= 0.00001:
Temp1 = Temp2
F_1 = (self.FNF(Temp1 + 0.01, TK, A, B, P) - self.FNF(Temp1, TK, A, B, P)) / 0.01
Temp2 = Temp1 - Q * self.FNF(Temp1, TK, A, B, P) / F_1
F_2 = (self.FNF(Temp2 + 0.01, TK, A, B, P) - self.FNF(Temp2, TK, A, B, P)) / 0.01
if F_2 * F_1 <= 0:
Q = Q / 2.
if abs(Temp2 - Temp1) > 0.00001:
F_1 = F_2
V = Temp2
G_1 = np.log(V / (V - B)) + B_1 / (V - B) - 2 * (X_1 * self.FNA(TK) + (1 - X_1) * self.FNC(TK)) * np.log((V + B) / V) / (R * TK**1.5 * B)
G_1 = G_1 + (np.log((V + B) / V) - B / (V + B)) * A * B_1 / (R * TK**1.5 * B**2) - np.log(P * V / (R * TK))
G_1 = np.exp(G_1)
G_2 = np.log(V / (V - B)) + B_2 / (V - B) - 2 * (X_1 * self.FNC(TK) + (1 - X_1) * self.FNB(TK)) * np.log((V + B) / V) / (R * TK**1.5 * B)
G_2 = G_2 + (np.log((V + B) / V) - B / (V + B)) * A * B_2 / (R * TK**1.5 * B**2) - np.log(P * V / (R * TK))
G_2 = np.exp(G_2)
if X_1 == 0:
fCO2o = G_2 * P #The fugacity of CO2
# return fCO2o
if X_1 == 1:
fH2Oo = G_1 * P #The fugacity of H2O
# return fH2Oo
return fCO2o
class fugacity_MRK_h2o(FugacityModel):
""" Modified Redlick Kwong fugacity model as used by VolatileCalc. Python implementation by
<NAME> (github.com/DJRgeoscience/VolatileCalcForPython), based on VB code by Newman &
Lowenstern.
"""
def __init__(self):
self.set_calibration_ranges([])
def fugacity(self,pressure,temperature,X_fluid=1.0,**kwargs):
""" Calculates the fugacity of H2O in a pure or mixed H2O-CO2 fluid (assuming ideal mixing).
Parameters
----------
pressure float
Total pressure of the system in bars.
temperature float
Temperature in degC
X_fluid float
Mole fraction of H2O in the fluid.
Returns
-------
float
fugacity of CO2 in bars
"""
fug = self.MRK(pressure,temperature+273.15)
return fug*X_fluid
def FNA(self,TK):
return (166800000 - 193080 * (TK - 273.15) + 186.4 * (TK - 273.15)**2 - 0.071288 * ((TK - 273.15)**3)) * 1.01325
def FNB(self,TK):
return 1.01325 * (73030000 - 71400 * (TK - 273.15) + 21.57 * (TK - 273.15)**2)
def FNC(self,TK):
R = 83.14321
return 1.01325 * (np.exp(-11.071 + 5953 / TK - 2746000 / TK**2 + 464600000 / TK**3) * 0.5 * R * R * TK**2.5 / 1.02668 + 40123800)
def FNF(self,V,TK,A,B,P):
R = 83.14321
return R * TK / (V - B) - A / ((V * V + B * V) * TK**0.5) - P
def MRK(self,P,TK): #Redlich-Kwong routine to estimate endmember H2O and CO2 fugacities
R = 83.14321
B_1 = 14.6
B_2 = 29.7
# X_1 = 1
for X_1 in [0,1]:
B = X_1 * B_1 + (1 - X_1) * B_2
A = X_1**2 * self.FNA(TK) + 2 * X_1 * (1 - X_1) * self.FNC(TK) + (1 - X_1)**2 * self.FNB(TK)
Temp2 = B + 5
Q = 1
Temp1 = 0
while abs(Temp2 - Temp1) >= 0.00001:
Temp1 = Temp2
F_1 = (self.FNF(Temp1 + 0.01, TK, A, B, P) - self.FNF(Temp1, TK, A, B, P)) / 0.01
Temp2 = Temp1 - Q * self.FNF(Temp1, TK, A, B, P) / F_1
F_2 = (self.FNF(Temp2 + 0.01, TK, A, B, P) - self.FNF(Temp2, TK, A, B, P)) / 0.01
if F_2 * F_1 <= 0:
Q = Q / 2.
if abs(Temp2 - Temp1) > 0.00001:
F_1 = F_2
V = Temp2
G_1 = np.log(V / (V - B)) + B_1 / (V - B) - 2 * (X_1 * self.FNA(TK) + (1 - X_1) * self.FNC(TK)) * np.log((V + B) / V) / (R * TK**1.5 * B)
G_1 = G_1 + (np.log((V + B) / V) - B / (V + B)) * A * B_1 / (R * TK**1.5 * B**2) - np.log(P * V / (R * TK))
G_1 = np.exp(G_1)
G_2 = np.log(V / (V - B)) + B_2 / (V - B) - 2 * (X_1 * self.FNC(TK) + (1 - X_1) * self.FNB(TK)) * np.log((V + B) / V) / (R * TK**1.5 * B)
G_2 = G_2 + (np.log((V + B) / V) - B / (V + B)) * A * B_2 / (R * TK**1.5 * B**2) - np.log(P * V / (R * TK))
G_2 = np.exp(G_2)
if X_1 == 0:
fCO2o = G_2 * P #The fugacity of CO2
# return fCO2o
if X_1 == 1:
fH2Oo = G_1 * P #The fugacity of H2O
# return fH2Oo
return fH2Oo
class fugacity_HB_co2(FugacityModel):
"""
Implementation of the Holloway and Blank (1994) Modified Redlich Kwong EoS for CO2.
"""
def __init__(self):
self.set_calibration_ranges([CalibrationRange('pressure',[1,1e5],crf_Between,'bar','Redlich Kwong EOS',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',500.0,crf_GreaterThan,'oC','Redlich Kwong EOS',
fail_msg=crmsg_GreaterThan_fail, pass_msg=crmsg_GreaterThan_pass, description_msg=crmsg_GreaterThan_description)])
self.HBmodel = fugacity_HollowayBlank()
def fugacity(self,pressure,temperature,X_fluid=1.0,**kwargs):
return self.HBmodel.fugacity(pressure=pressure, temperature=temperature, species='CO2')*X_fluid
class fugacity_HB_h2o(FugacityModel):
"""
Implementation of the Holloway and Blank (1994) Modified Redlich Kwong EoS for H2O.
"""
def __init__(self):
self.set_calibration_ranges([CalibrationRange('pressure',[1,1e5],crf_Between,'bar','Redlich Kwong EOS',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',500.0,crf_GreaterThan,'oC','Redlich Kwong EOS',
fail_msg=crmsg_GreaterThan_fail, pass_msg=crmsg_GreaterThan_pass, description_msg=crmsg_GreaterThan_description)])
self.HBmodel = fugacity_HollowayBlank()
def fugacity(self,pressure,temperature,X_fluid=1.0,**kwargs):
return self.HBmodel.fugacity(pressure=pressure, temperature=temperature, species='H2O')*X_fluid
class fugacity_HollowayBlank(FugacityModel):
"""
Implementation of the Modified Redlich Kwong presented in Holloway and Blank (1994) Reviews
in Mineralogy and Geochemistry vol. 30. Originally written in Quickbasic. CO2 calculations
translated to Matlab by <NAME> and translated to python by <NAME> for VESIcal.
H2O calculations translated to VisualBasic by <NAME> and translated to python by
<NAME> for VESIcal.
"""
def __init__(self):
self.set_calibration_ranges([CalibrationRange('pressure',[1,1e5],crf_Between,'bar','MRK EOS (Holloway and Blank, 1994)',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',500,crf_GreaterThan,'oC','MRK EOS (Holloway and Blank, 1994)',
fail_msg=crmsg_GreaterThan_fail, pass_msg=crmsg_GreaterThan_pass, description_msg=crmsg_GreaterThan_description)])
def REDKW(self, BP, A2B):
"""
The RK routine. A routine to calculate compressibility factor and fugacity coefficient
with the Redlich-Kwong equation following Edmister (1968). This solution for supercritical
fluid.
Parameters
----------
BP: float
B parameter sum from RKCALC
A2B: float
A parameter sum from RKCALC
Returns
-------
float
XLNFP (fugacity coefficient?)
"""
if A2B < 1*10**(-10):
A2B = 0.001
#Define constants
TH = 0.333333
RR = -A2B*BP**2
QQ = BP*(A2B-BP-1)
XN = QQ*TH+RR-0.074074
XM = QQ-TH
XNN = XN*XN*0.25
XMM = XM**3 / 27.0
ARG = XNN+XMM
if ARG > 0:
X = np.sqrt(ARG)
F = 1
XN2 = -XN*0.5
iXMM = XN2+X
if iXMM < 0:
F = -1
XMM = F*((F*iXMM)**TH)
F = 1
iXNN = XN2 - X
if iXNN < 0:
F = -1
XNN = F*((F*iXNN)**TH)
Z = XMM+XNN+TH
ZBP = Z-BP
if ZBP < 0.000001:
ZBP = 0.000001
BPZ = 1+BP/Z
FP = Z-1-np.log(ZBP)-A2B*np.log(BPZ)
if FP < -37 or FP > 37:
FP = 0.000001
elif ARG <0:
COSPHI = np.sqrt(-XNN/XMM)
if XN > 0:
COSPHI = -COSPHI
TANPHI = np.sqrt(1-COSPHI**2)/COSPHI
PHI = np.arctan(TANPHI)*TH
FAC = 2*np.sqrt(-XM*TH)
#sort for largest root
R1 = np.cos(PHI)
R2 = np.cos(PHI+2.0944)
R3 = np.cos(PHI+4.18879)
RH = R2
if R1 > R2:
RH = R1
if R3 > RH:
RH = R3
Z = RH*FAC+TH
ZBP = Z-BP
if ZBP < 0.000001:
ZBP = 0.000001
BPZ = 1+BP/Z
FP = Z-1-np.log(ZBP)-A2B*np.log(BPZ)
if FP < -37 or FP > 37:
FP = 0.000001
else:
FP = 1
Z = 1
XLNFP = FP
return XLNFP
def Saxena(self, TK, pb):
"""
High pressure corresponding states routines from Saxena and Fei (1987) GCA
vol. 51, 783-791.
Parameters
----------
TK: float
Temperature in K.
pb: float
Pressure in bars.
Returns
-------
float
XLNF, Natural log of the ratio F(P)/F(4000 bar)
"""
#Define integration limit
PO = 4000
#Critical temperatures and pressures for CO2
TR = TK/304.2
PR = pb/73.9
PC = 73.9
#Virial coeficients
A = 2.0614-2.2351/TR**2 - 0.39411*np.log(TR)
B = 0.055125/TR + 0.039344/TR**2
C = -1.8935*10**(-6)/TR - 1.1092*10**(-5)/TR**2 - 2.1892*10**(-5)/TR**3
D = 5.0527*10**(-11)/TR - 6.3033*10**(-21)/TR**3
#Calculate molar volume
Z = A+B*PR+C*PR**2+D*PR**3
V = Z*83.0117*TK/pb
#integrate from PO (4000 bars) to P to calculate ln fugacity
LNF = A*np.log(pb/PO)+(B/PC)*(pb-PO)+(C/(2*PC**2))*(pb**2-PO**2)
LNF = LNF+(D/(3*PC**3))*(pb**3-PO**3)
XLNF = LNF
return XLNF
def RKCALC(self, temperature, pressure, species):
"""
Calculation of pure gas MRK properties following Holloway 1981, 1987
Parameters
----------
temperature: float
Temperature in degrees K.
pressure: float
Pressure in atmospheres.
Returns
-------
float
Natural log of the fugacity of a pure gas.
"""
#Define constants
R = 82.05736
RR = 6732.2
pb = 1.013*pressure
PBLN = np.log(pb)
TCEL = temperature-273.15
RXT = R*temperature
RT = R*temperature**1.5 * 10**(-6)
if species == 'CO2':
#Calculate T-dependent MRK A parameter CO2
ACO2M = 73.03 - 0.0714*TCEL + 2.157*10**(-5)*TCEL**2
#Define MRK B parameter for CO2
BSUM = 29.7
ASUM = ACO2M / (BSUM*RT)
elif species == 'H2O':
#Calculate T-dependent MRK A parameter H2O
AH2OM = 115.98 - np.double(0.0016295)*temperature - 1.4984*10**(-5)*temperature**2
#Define MRK B parameter for H2O
BSUM = 14.5
ASUM = AH2OM / (BSUM*RT)
BSUM = pressure*BSUM/RXT
XLNFP = self.REDKW(BSUM, ASUM)
#Convert to ln(fugacity)
PUREG = XLNFP + PBLN
return PUREG
def fugacity(self, pressure, temperature, species, **kwargs):
"""
Calculates fugacity.
Parameters
----------
temperature: float
Temperature in degrees C.
pressure: float
Pressure in bars.
species: str
Choose which species to calculate. Options are 'H2O' and 'CO2'.
Returns
-------
float
Fugacity coefficient for passed species
"""
#convert temp and press to atmospheres and Kelvin
pressureAtmo = pressure/1.013
temperatureK = temperature + 273.15
PO = 4000/1.013
#Use the MRK below 4,000 bars, Saxena above 4,000 bars
if pressure > 4000 and species=='CO2':
iPUREG = self.RKCALC(temperatureK, PO, species)
XLNF = self.Saxena(temperatureK, pressure)
PUREG = iPUREG + XLNF
else:
PUREG = self.RKCALC(temperatureK, pressureAtmo, species)
#Convert from ln(fugacity) to fugacity
stdf = np.exp(PUREG)
return stdf
class fugacity_RK_co2(FugacityModel):
"""
Implementation of the Redlich Kwong EoS for CO2.
Code derived from http://people.ds.cam.ac.uk/pjb10/thermo/pure.html - <NAME> 30 October 2003.
"""
def __init__(self):
self.set_calibration_ranges([CalibrationRange('pressure',[1,1e5],crf_Between,'bar','Redlich Kwong EOS',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',[500],crf_GreaterThan,'oC','Redlich Kwong EOS',
fail_msg=crmsg_GreaterThan_fail, pass_msg=crmsg_GreaterThan_pass, description_msg=crmsg_GreaterThan_description)])
# self.set_calibration_ranges([cr_Between('pressure',[1.0,1e5],'bar','Redlich Kwong EOS'),
# cr_GreaterThan('temperature',500,'oC','Redlich Kwong EOS')])
self.RKmodel = fugacity_RedlichKwong()
def fugacity(self,pressure,temperature,X_fluid,**kwargs):
return self.RKmodel.fugacity(pressure, temperature, X_fluid, 'CO2')
class fugacity_RK_h2o(FugacityModel):
"""
Implementation of the Redlich Kwong EoS for H2O.
Code derived from http://people.ds.cam.ac.uk/pjb10/thermo/pure.html - <NAME> 30 October 2003.
"""
def __init__(self):
self.set_calibration_ranges([CalibrationRange('pressure',[1,1e5],crf_Between,'bar','Redlich Kwong EOS',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',500,crf_GreaterThan,'oC','Redlich Kwong EOS',
fail_msg=crmsg_GreaterThan_fail, pass_msg=crmsg_GreaterThan_pass, description_msg=crmsg_GreaterThan_description)])
self.RKmodel = fugacity_RedlichKwong()
def fugacity(self,pressure,temperature,X_fluid,**kwargs):
return self.RKmodel.fugacity(pressure, temperature, X_fluid, 'H2O')
class fugacity_RedlichKwong(FugacityModel):
"""
Implementation of the Redlich Kwong EoS
Code derived from http://people.ds.cam.ac.uk/pjb10/thermo/pure.html - <NAME> 30 October 2003.
"""
def __init__(self):
self.set_calibration_ranges([CalibrationRange('pressure',[1,1e5],crf_Between,'bar','Redlich Kwong EOS',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',500,crf_GreaterThan,'oC','Redlich Kwong EOS',
fail_msg=crmsg_GreaterThan_fail, pass_msg=crmsg_GreaterThan_pass, description_msg=crmsg_GreaterThan_description)])
def gamma(self, pressure, temperature, species):
"""
Calculates fugacity coefficients.
Parameters
----------
temperature: fload
Temperature in degrees C.
pressure: float
Pressure in bars.
species: str
Choose which species to calculate. Options are 'H2O' and 'CO2'.
Returns
-------
float
Fugacity coefficient for passed species.
"""
temperatureK = temperature + 273.15
R = 8.3145
fluid_species_names = ['CO2', 'H2O']
critical_params = {'CO2':{ "cT": 304.15,
"cP": 73.8659,
"o": 0.225
},
'H2O':{ "cT": 647.25,
"cP": 221.1925,
"o": 0.334
}
}
#Calculate a and b parameters (depend only on critical parameters)...
a = 0.42748 * R**2.0 * critical_params[species]["cT"]**(2.5) / (critical_params[species]["cP"] * 10.0**5)
b = 0.08664 * R * critical_params[species]["cT"] / (critical_params[species]["cP"] * 10.0**5)
kappa = 0.0
#Calculate coefficients in the cubic equation of state...
#coeffs: (C0, C1, C2, A, B)
A = a * pressure * 10.0**5 / (np.sqrt(temperatureK) * (R * temperatureK)**2.0)
B = b * pressure * 10.0**5 / (R * temperatureK)
C2 = -1.0
C1 = A - B - B * B
C0 = -A * B
#Solve the cubic equation for Z0 - Z2, D...
Q1 = C2 * C1 / 6.0 - C0 / 2.0 - C2**3.0 / 27.0
P1 = C2**2.0 / 9.0 - C1 / 3.0
D = Q1**2.0 - P1**3.0
if D >= 0:
kOneThird = 1.0 / 3.0
absQ1PSqrtD = np.fabs(Q1 + np.sqrt(D))
temp1 = absQ1PSqrtD**kOneThird
temp1 *= (Q1 + np.sqrt(D)) / absQ1PSqrtD
absQ1MSqrtD = np.fabs(Q1 - np.sqrt(D))
temp2 = absQ1MSqrtD**kOneThird
temp2 *= (Q1 - np.sqrt(D)) / absQ1MSqrtD
Z0 = temp1 + temp2 - C2 / 3.0
else:
temp1 = Q1**2.0 / (P1**3.0)
temp2 = np.sqrt(1.0 - temp1) / np.sqrt(temp1)
temp2 *= Q1 / np.fabs(Q1)
gamma = np.arctan(temp2)
if gamma < 0:
gamma = gamma + np.pi
Z0 = 2.0 * np.sqrt(P1) * np.cos(gamma/3.0) - C2 / 3.0
Z1 = 2.0 * np.sqrt(P1) * np.cos((gamma + 2.0 * np.pi) / 3.0) - C2/3.0
Z2 = 2.0 * np.sqrt(P1) * np.cos((gamma + 4.0 * np.pi) / 3.0) - C2/3.0
if Z0 < Z1:
temp0 = Z0
Z0 = Z1
Z1 = temp0
if Z1 < Z2:
temp0 = Z1
Z1 = Z2
Z2 = temp0
if Z0 < Z1:
temp0 = Z0
Z0 = Z1
Z1 = temp0
#Calculate Departure Functions
gamma = np.exp(Z0 - 1.0 - np.log(Z0-B) - A * np.log(1.0+B/Z0)/B)
Hdep = R * temperatureK * (Z0 - 1.0 - 1.5*A*np.log(1.0+B/Z0)/B)
Sdep = R * (np.log(Z0-B) - 0.5*A*np.log(1.0+B/Z0)/B)
return gamma
def fugacity(self, pressure, temperature, X_fluid=1.0, species='H2O', **kwargs):
"""
Calculates the fugacity of H2O in a mixed H2O-CO2 fluid using the universal relationships:
P_i = f_i/gamma_i = (fpure_i * Xfluid_i) / gamma_i
See Iacovino (2015) EPSL for further explanation.
"""
gammaH2O = self.gamma(pressure, temperature, 'H2O')
gammaCO2 = self.gamma(pressure, temperature, 'CO2')
fugacityH2Opure = pressure * gammaH2O
fugacityCO2pure = pressure * gammaCO2
if species == 'H2O':
return fugacityH2Opure * X_fluid
elif species == 'CO2':
return fugacityCO2pure * X_fluid
else:
raise InputError("Species must be H2O or CO2.")
#---------------ACTVITY MODELS-------------------------------#
class activity_idealsolution(activity_model):
""" Implements an ideal solution activity model, i.e. it
will always return the mole fraction.
"""
def activity(self,X):
""" The activity of the component in an ideal solution, i.e., it
will return the mole fraction.
Parameters
----------
X float
The mole fraction of the species in the solution.
Returns
-------
float
The activity of the species in the solution, i.e., the mole fraction.
"""
return X
#------------PURE FLUID MODELS-------------------------------#
class ShishkinaCarbon(Model):
""" Implementation of the Shishkina et al. (2014) carbon solubility model, as a Model class.
"""
def __init__(self):
self.set_volatile_species(['CO2'])
self.set_fugacity_model(fugacity_idealgas())
self.set_activity_model(activity_idealsolution())
self.set_solubility_dependence(False)
self.set_calibration_ranges([CalibrationRange('pressure',[500.0,5000.0],crf_Between,'bar','Shishkina et al. carbon',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',[1200.0,1250.0],crf_Between,'oC','Shishkina et al. carbon',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description)])
def preprocess_sample(self,sample):
""" Returns sample, unmodified. The Pi* compositional parameter is a ratio of cations,
therefore the value is not affected by the normalization of the sample. Shishkina et al.
imply the accuracy of the calculations are little affected whether Fe(tot) or Fe2+ is
used.
Parameters
----------
sample: dict or pandas Series
The major element oxides in wt%.
Returns
-------
dict or pandas Series
The major element oxides in wt%.
"""
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
return sample
def PiStar(self,sample):
"""Shishkina et al. (2014) Eq (11)
Calculates the Pi* parameter for use in calculating CO2 solubility.
Parameters
----------
sample: pandas Series or dict
Major element oxides in wt%.
Returns
-------
float
The value of the Pi* compositional parameter.
"""
_mols = wtpercentOxides_to_molCations(sample)
if all(cation in _mols for cation in ['Ca','K','Na','Mg','Fe','Si','Al']) == False:
raise InputError("To calculate PiStar, values for CaO, K2O, Na2O, MgO, FeO, SiO2, and Al2O3\
must be provided in sample.")
_pi = (_mols['Ca'] + 0.8*_mols['K'] + 0.7*_mols['Na'] + 0.4*_mols['Mg'] + 0.4*_mols['Fe'])/\
(_mols['Si']+_mols['Al'])
return _pi
def calculate_dissolved_volatiles(self,pressure,sample,X_fluid=1,**kwargs):
""" Calculates the dissolved CO2 concentration in wt%, using equation (13) of Shishkina et al. (2014).
Parameters
----------
pressure: float
(Total) pressure in bars.
sample: dict or pandas Series
Major element concentrations in wt%. Normalization does not matter.
X_fluid: float
The mol-fraction of the fluid that is CO2. Default is 1, i.e. a pure CO2 fluid.
Returns
-------
float
The dissolved CO2 concentration in wt%.
"""
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if pressure < 0:
raise InputError("pressure must be a positive value.")
PiStar = self.PiStar(sample)
fugacity = self.fugacity_model.fugacity(pressure=pressure,X_fluid=X_fluid,**kwargs)
A = 1.150
B = 6.71
C= -1.345
if fugacity == 0:
return 0
else:
return np.exp(A*np.log(fugacity/10)+B*PiStar+C)/1e4
def calculate_equilibrium_fluid_comp(self,pressure,sample,**kwargs):
""" Returns 1.0 if a pure CO2 fluid is saturated.
Returns 0.0 if a pure CO2 fluid is undersaturated.
Parameters
----------
pressure float
The total pressure of the system in bars.
sample dict or pandas Series
Major element oxides in wt%
Returns
-------
float
1.0 if CO2-fluid saturated, 0.0 otherwise.
"""
if self.calculate_saturation_pressure(sample=sample,**kwargs) < pressure:
return 0.0
else:
return 1.0
def calculate_saturation_pressure(self,sample,**kwargs):
""" Calculates the pressure at which a pure CO2 fluid is saturated, for the given
sample composition and CO2 concentration. Calls the scipy.root_scalar routine, which makes
repeated calls to the calculate_dissolved_volatiles method.
Parameters
----------
sample dict or pandas Series
Major elements in wt%, including CO2 (also in wt%).
Returns
-------
float
Saturation pressure in bar
"""
if 'CO2' not in sample:
raise InputError("sample must contain CO2.")
if sample['CO2'] < 0:
raise InputError("CO2 concentration must be greater than 0 wt%.")
try:
satP = root_scalar(self.root_saturation_pressure,bracket=[1e-15,1e5],args=(sample,kwargs)).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return satP
def root_saturation_pressure(self,pressure,sample,kwargs):
""" Function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Pressure guess in bars
sample dict or pandas Series
Major element oxides in wt%, including CO2 (also in wt%).
kwargs dictionary
Additional keyword arguments supplied to calculate_saturation_pressure. Might be required for
the fugacity or activity models.
Returns
-------
float
The differece between the dissolved CO2 at the pressure guessed, and the CO2 concentration
passed in the sample variable.
"""
return self.calculate_dissolved_volatiles(pressure=pressure,sample=sample,**kwargs)-sample['CO2']
class ShishkinaWater(Model):
""" Implementation of the Shishkina et al. (2014) H2O solubility model as a Model class.
"""
def __init__(self):
self.set_volatile_species(['H2O'])
self.set_fugacity_model(fugacity_idealgas())
self.set_activity_model(activity_idealsolution())
self.set_solubility_dependence(False)
self.set_calibration_ranges([CalibrationRange('pressure',[500.0,5000.0],crf_Between,'bar','Shishkina et al. water',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',[1200.0,1250.0],crf_Between,'oC','Shishkina et al. water',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description)])
def preprocess_sample(self,sample):
""" Returns sample, renormlized so that the major element oxides (excluding volatiles) sum to 100%.
Normalization must be done this way as the compositional dependence of the solubility takes the
mole fractions of Na2O and K2O as inputs, presumably assuming no volatiles in the bulk composition.
Volatile concentrations are left unchanged.
Parameters
----------
sample: dict or pandas Series
The major element oxides in wt%.
Returns
-------
dict or pandas Series
The major element oxides in wt%.
"""
return normalize_AdditionalVolatiles(sample)
def calculate_dissolved_volatiles(self,pressure,sample,X_fluid=1.0,**kwargs):
"""Calculates the dissolved H2O concentration using Eqn (9) of Shishkina et al. (2014).
Parameters
----------
pressure float
Total pressure in bars
sample pandas Series or dict
Major element oxides in wt%. Normalized to zero-volatiles so that the total-alkalis
mol fraction can be determined accurately.
X_fluid float
The mol fraction of H2O in the fluid
Returns
-------
float
The H2O concentration in wt%
"""
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or pandas Series.")
if all(ox in sample for ox in ['Na2O','K2O']) == False:
raise InputError("Na2O and K2O must be present in sample.")
if pressure < 0:
raise InputError("Pressure must be positive.")
_mols = wtpercentOxides_to_molCations(sample)
_mol_volatiles = 0
if 'H' in _mols:
_mol_volatiles += _mols['H']
if 'C' in _mols:
_mol_volatiles += _mols['C']
total_alkalis = (_mols['Na'] + _mols['K'])/(1-_mol_volatiles)
fugacity = self.fugacity_model.fugacity(pressure,X_fluid=X_fluid,**kwargs)
a = 3.36e-7 * (fugacity/10)**3 - 2.33e-4*(fugacity/10)**2 + 0.0711*(fugacity/10) - 1.1309
b = -1.2e-5*(fugacity/10)**2 + 0.0196*(fugacity/10)+1.1297
return a*total_alkalis + b
def calculate_equilibrium_fluid_comp(self,pressure,sample,**kwargs):
""" Returns 1.0 if a pure H2O fluid is saturated.
Returns 0.0 if a pure H2O fluid is undersaturated.
Parameters
----------
pressure float
The total pressure of the system in bars.
sample pandas Series or dict
Major element oxides in wt%, normalized on the basis of
no volatiles.
Returns
-------
float
1.0 if H2O-fluid saturated, 0.0 otherwise.
"""
if self.calculate_saturation_pressure(sample=sample,**kwargs) < pressure:
return 0.0
else:
return 1.0
def calculate_saturation_pressure(self,sample,**kwargs):
""" Calculates the pressure at which a pure H2O fluid is saturated, for the given
sample composition and H2O concentration. Calls the scipy.root_scalar routine, which makes
repeated calls to the calculate_dissolved_volatiles method.
Parameters
----------
sample pandas Series or dict
Major elements in wt% (normalized to 100%), including H2O (also in wt%, not included
in normalization).
Returns
-------
float
Saturation pressure in bar
"""
if 'H2O' not in sample:
raise InputError("sample must contain H2O")
if sample['H2O'] < 0:
raise InputError("H2O concentration must be greater than 0 wt%.")
if sample['H2O'] < self.calculate_dissolved_volatiles(sample=sample,pressure=0,**kwargs):
return np.nan
try:
satP = root_scalar(self.root_saturation_pressure,bracket=[1e-15,1e5],args=(sample,kwargs)).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return satP
def root_saturation_pressure(self,pressure,sample,kwargs):
""" Function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Pressure guess in bars
sample pandas Series or dict
Major elements in wt% (normalized to 100%), including H2O (also in wt%, not included
in normalization).
kwargs dictionary
Additional keyword arguments supplied to calculate_saturation_pressure. Might be required for
the fugacity or activity models.
Returns
-------
float
The differece between the dissolved H2O at the pressure guessed, and the H2O concentration
passed in the sample variable.
"""
return self.calculate_dissolved_volatiles(pressure=pressure,sample=sample,**kwargs)-sample['H2O']
class DixonCarbon(Model):
"""
Implementation of the Dixon (1997) carbon solubility model, as a Model class.
"""
def __init__(self):
self.set_volatile_species(['CO2'])
self.set_fugacity_model(fugacity_MRK_co2())
self.set_activity_model(activity_idealsolution())
self.set_calibration_ranges([])
self.set_solubility_dependence(False)
def preprocess_sample(self,sample):
""" Returns sample, normalized, keep volatiles unchanged.
Parameters
----------
sample: pandas Series or dict
The major element oxides in wt%.
Returns
-------
pandas Series or dict
The major element oxides in wt%.
"""
return normalize_FixedVolatiles(sample)
def calculate_dissolved_volatiles(self,pressure,sample,X_fluid=1.0,**kwargs):
"""Calculates the dissolved CO2 concentration using Eqn (3) of Dixon (1997).
Parameters
----------
pressure float
Total pressure in bars.
sample pandas Series or dict
Major element oxides in wt%.
X_fluid float
The mol fraction of CO2 in the fluid.
Returns
-------
float
The CO2 concentration in wt%.
"""
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if pressure < 0:
raise InputError("Pressure must be positive.")
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or pandas Series")
if 'SiO2' not in sample:
raise InputError("sample must contain SiO2.")
if pressure == 0:
return 0
Mr = wtpercentOxides_to_formulaWeight(sample)
XCO3 = self.molfrac_molecular(pressure=pressure,sample=sample,X_fluid=X_fluid,**kwargs)
# return (4400 * XCO3) / (36.6 - 44*XCO3)
return (4400 * XCO3) / (Mr - 44*XCO3)
def calculate_equilibrium_fluid_comp(self,pressure,sample,**kwargs):
""" Returns 1.0 if a pure H2O fluid is saturated.
Returns 0.0 if a pure H2O fluid is undersaturated.
Parameters
----------
pressure float
The total pressure of the system in bars.
sample pandas Series or dict
Major element oxides in wt% (including CO2).
Returns
-------
float
1.0 if CO2-fluid saturated, 0.0 otherwise.
"""
if self.calculate_saturation_pressure(sample=sample,**kwargs) < pressure:
return 0.0
else:
return 1.0
def calculate_saturation_pressure(self,sample,X_fluid=1.0,**kwargs):
"""
Calculates the pressure at which a pure CO2 fluid is saturated, for the given sample
composition and CO2 concentration. Calls the scipy.root_scalar routine, which makes
repeated called to the calculate_dissolved_volatiles method.
Parameters
----------
sample pandas Series or dict
Major element oxides in wt% (including CO2).
X_fluid float
The mole fraction of CO2 in the fluid. Default is 1.0.
Returns
-------
float
Calculated saturation pressure in bars.
"""
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or pandas Series.")
if 'CO2' not in sample:
raise InputError("sample must contain CO2.")
if sample['CO2'] < 0:
raise InputError("Dissolved CO2 concentration must be greater than 0 wt%.")
try:
satP = root_scalar(self.root_saturation_pressure,x0=100.0,x1=1000.0,args=(sample,kwargs)).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return np.real(satP)
def molfrac_molecular(self,pressure,sample,X_fluid=1.0,**kwargs):
"""Calculates the mole fraction of CO3(-2) dissolved when in equilibrium with
a pure CO2 fluid at 1200C, using Eqn (1) of Dixon (1997).
Parameters
----------
pressure float
Total pressure in bars.
sample pandas Series or dict
Major element oxides in wt%.
X_fluid float
Mole fraction of CO2 in the fluid.
Returns
-------
float
Mole fraction of CO3(2-) dissolved."""
DeltaVr = 23.14 #cm3 mole-1
P0 = 1
R = 83.15
T0 = 1473.15
fugacity = self.fugacity_model.fugacity(pressure=pressure,X_fluid=X_fluid,**kwargs)
XCO3Std = self.XCO3_Std(sample)
return XCO3Std * fugacity * np.exp(-DeltaVr * (pressure-P0)/(R*T0))
def XCO3_Std(self,sample):
""" Calculates the mole fraction of CO3(2-) dissolved when in equilibrium with pure
CO2 vapour at 1200C and 1 bar, using Eq (8) of Dixon (1997).
Parameters
----------
sample pandas Series or dict
Major element oxides in wt%.
Returns
-------
float
Mole fraction of CO3(2-) dissolved at 1 bar and 1200C.
"""
if sample['SiO2'] > 48.9:
return 3.817e-7
else:
return 8.697e-6 - 1.697e-7*sample['SiO2']
def root_saturation_pressure(self,pressure,sample,kwargs):
""" The function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Total pressure in bars.
sample pandas Series or dict
Major element oxides in wt% (including CO2).
Returns
-------
float
The difference between the dissolved CO2 the pressure guessed, and the CO2 concentration
passed in the sample variable.
"""
return self.calculate_dissolved_volatiles(pressure=pressure,sample=sample,**kwargs) - sample['CO2']
class DixonWater(Model):
"""
Implementation of the Dixon (1997) water solubility model, as a Model class.
"""
def __init__(self):
self.set_volatile_species(['H2O'])
self.set_fugacity_model(fugacity_MRK_h2o())
self.set_activity_model(activity_idealsolution())
self.set_calibration_ranges([])
self.set_solubility_dependence(False)
def preprocess_sample(self,sample):
""" Returns sample, normalized, holding volatile concentrations constant.
Parameters
----------
sample: pandas Series or dict
The major element oxides in wt%.
Returns
-------
pandas Series or dict
The major element oxides in wt%.
"""
return normalize_FixedVolatiles(sample)
def calculate_dissolved_volatiles(self,pressure,sample,X_fluid=1.0,**kwargs):
"""Calculates the dissolved H2O concentration using Eqns (5) and (6) of Dixon (1997).
Parameters
----------
pressure float
Total pressure in bars.
sample pandas Series or dict
Major element oxides in wt%.
X_fluid float
The mol fraction of H2O in the fluid.
Returns
-------
float
The H2O concentration in wt%.
"""
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if 'SiO2' not in sample:
raise InputError("sample must contain SiO2.")
if pressure < 0:
raise InputError("Pressure must be positive")
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if pressure == 0:
return 0
XH2O = self.molfrac_molecular(pressure=pressure,sample=sample,X_fluid=X_fluid,**kwargs)
XOH = self.XOH(pressure=pressure,sample=sample,X_fluid=X_fluid,**kwargs)
Mr = wtpercentOxides_to_formulaWeight(sample)
XB = XH2O + 0.5*XOH
# return 1801.5*XB/(36.6-18.6*XB)
return 1801.5*XB/(Mr-18.6*XB)
def calculate_equilibrium_fluid_comp(self,pressure,sample,**kwargs):
""" Returns 1.0 if a pure H2O fluid is saturated.
Returns 0.0 if a pure H2O fluid is undersaturated.
Parameters
----------
pressure float
The total pressure of the system in bars.
sample pandas Series or dict
Major element oxides in wt% (including H2O).
Returns
-------
float
1.0 if H2O-fluid saturated, 0.0 otherwise.
"""
if self.calculate_saturation_pressure(sample=sample,**kwargs) < pressure:
return 0.0
else:
return 1.0
def calculate_saturation_pressure(self,sample,X_fluid=1.0,**kwargs):
"""
Calculates the pressure at which a pure H2O fluid is saturated, for the given sample
composition and H2O concentration. Calls the scipy.root_scalar routine, which makes
repeated called to the calculate_dissolved_volatiles method.
Parameters
----------
sample pandas Series or dict
Major element oxides in wt% (including H2O).
X_fluid float
The mole fraction of H2O in the fluid. Default is 1.0.
Returns
-------
float
Calculated saturation pressure in bars.
"""
if 'H2O' not in sample:
raise InputError("sample must contain H2O")
if sample['H2O'] < 0:
raise InputError("H2O concentration must be greater than 0 wt%.")
try:
satP = root_scalar(self.root_saturation_pressure,x0=100.0,x1=1000.0,args=(sample,kwargs)).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return np.real(satP)
def molfrac_molecular(self,pressure,sample,X_fluid=1.0,**kwargs):
"""Calculates the mole fraction of molecular H2O dissolved when in equilibrium with
a pure H2O fluid at 1200C, using Eqn (2) of Dixon (1997).
Parameters
----------
pressure float
Total pressure in bars.
sample pandas Series or dict
Major element oxides in wt%.
X_fluid float
Mole fraction of H2O in the fluid.
Returns
-------
float
Mole fraction of molecular H2O dissolved.
"""
VH2O = 12 #cm3 mole-1
P0 = 1
R = 83.15
T0 = 1473.15
XH2OStd = self.XH2O_Std(sample)
fugacity = self.fugacity_model.fugacity(pressure=pressure,X_fluid=X_fluid,**kwargs)
return XH2OStd * fugacity * np.exp(-VH2O * (pressure-P0)/(R*T0))
def XH2O_Std(self,sample):
""" Calculates the mole fraction of molecular H2O dissolved when in equilibrium with pure
H2O vapour at 1200C and 1 bar, using Eq (9) of Dixon (1997).
Parameters
----------
sample pandas Series or dict
Major element oxides in wt%.
Returns
-------
float
Mole fraction of molecular water dissolved at 1 bar and 1200C.
"""
if sample['SiO2'] > 48.9:
return 3.28e-5
else:
return -3.04e-5 + 1.29e-6*sample['SiO2']
def XOH(self,pressure,sample,X_fluid=1.0,**kwargs):
"""
Calculates the mole fraction of hydroxyl groups dissolved by solving Eq (4) of
Dixon (1997). Calls scipy.root_scalar to find the root of the XOH_root method.
Parameters
----------
pressure float
Total pressure in bars.
sample pandas Series or dict
Major element oxides in wt%.
X_fluid float
Mole fraction of H2O in the fluid.
Returns
-------
float
Mole fraction of hydroxyl groups dissolved.
"""
XH2O = self.molfrac_molecular(pressure=pressure,sample=sample,X_fluid=X_fluid,**kwargs)
if XH2O < 1e-14:
return 0
return np.exp(root_scalar(self.XOH_root,x0=np.log(0.5),x1=np.log(0.1),args=(XH2O)).root)
def XOH_root(self,XOH,XH2O):
"""
Method called by scipy.root_scalar when finding the saturation pressure using the
calculate_saturation_pressure method. Implements Eq (4) of Dixon (1997).
Parameters
----------
XOH float
Guess for the mole fraction of hydroxyl groups dissolved in melt.
XH2O float
Mole fraction of molecular water dissolved in melt.
Returns
-------
float
The difference between the RHS and LHS of Eq (4) of Dixon (1997) for the
guessed value of XOH.
"""
A = 0.403
B = 15.333
C = 10.894
XOH = np.exp(XOH)
term = (XOH)**2.0/(XH2O*(1.0-XOH-XH2O))
lhs = - np.log(term)
rhs = A + B*XOH + C*XH2O
return rhs - lhs
def root_saturation_pressure(self,pressure,sample,kwargs):
""" Function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Pressure guess in bars
sample pandas Series or dict
Major elements in wt% (normalized to 100%), including H2O.
kwargs dictionary
Additional keyword arguments supplied to calculate_saturation_pressure. Might be required for
the fugacity or activity models.
Returns
-------
float
The differece between the dissolved H2O at the pressure guessed, and the H2O concentration
passed in the sample variable.
"""
return self.calculate_dissolved_volatiles(pressure=pressure,sample=sample,**kwargs) - sample['H2O']
class IaconoMarzianoWater(Model):
"""
Implementation of the Iacono-Marziano et al. (2012) water solubility model, as a Model class. Two
calibrations are provided- the one incorporating the H2O content as a parameter (hydrous), and the
one that does not (anhydrous). Specify which should be used when initialising the model, with the
bool variable hydrous.
"""
def __init__(self,hydrous=True):
"""
Initialise the model.
Parameters
----------
hydrous bool
Whether to use the hydrous parameterization, or not.
"""
self.set_volatile_species(['H2O'])
self.set_fugacity_model(fugacity_idealgas())
self.set_activity_model(activity_idealsolution())
self.hydrous = hydrous
self.set_calibration_ranges([])
self.set_solubility_dependence(False) #Not dependent on CO2 conc, H2O dependence dealt with within model.
def preprocess_sample(self,sample):
"""
Returns sample, normalized to 100 wt%, without changing the wt% of H2O and CO2 if the
hydrous parameterization is being used (default). If the anhydrous parameterization is
used, it will normalize without including H2O and CO2.
Parameters
----------
sample pandas Series or dict
Major element oxides in wt%.
Returns
-------
pandas Series or dict
Major element oxides normalized to wt%.
"""
if self.hydrous == True:
return normalize_FixedVolatiles(sample)
else:
return normalize_AdditionalVolatiles(sample)
def calculate_dissolved_volatiles(self,pressure,temperature,sample,X_fluid=1.0,
hydrous_coeffs=True,webapp_coeffs=False,**kwargs):
"""
Calculates the dissolved H2O concentration, using Eq (13) of Iacono-Marziano et al. (2012).
If using the hydrous parameterization, it will use the scipy.root_scalar routine to find the
root of the root_dissolved_volatiles method.
Parameters
----------
pressure float
Total pressure in bars.
temperature float
Temperature in C
sample pandas Series or dict
Major element oxides in wt%.
X_fluid float
Mole fraction of H2O in the fluid. Default is 1.0.
hydrous_coeffs bool
Use the hydrous or anhydrous NBO/O paramterisation (True for hydrous). Default is True.
webapp_coeffs bool
If True, use the pre-review hydrous coefficients, as implemented in the IM webapp.
Default is False.
Returns
-------
float
Dissolved H2O concentration in wt%.
"""
temperature = temperature + 273.15 #translate T from C to K
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if pressure < 0:
raise InputError("Pressure must be positive.")
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if pressure == 0:
return 0
if hydrous_coeffs == True:
if X_fluid==0:
return 0
H2O = root_scalar(self.root_dissolved_volatiles,args=(pressure,temperature,sample,X_fluid,hydrous_coeffs,kwargs),
x0=1.0,x1=2.0).root
return H2O
else:
a = 0.54
b = 1.24
B = -2.95
C = 0.02
fugacity = self.fugacity_model.fugacity(pressure=pressure,X_fluid=X_fluid,temperature=temperature-273.15,**kwargs)
if fugacity == 0:
return 0
NBO_O = self.NBO_O(sample=sample,hydrous_coeffs=False)
H2O = np.exp(a*np.log(fugacity) + b*NBO_O + B + C*pressure/temperature)
return H2O
def calculate_equilibrium_fluid_comp(self,pressure,temperature,sample,**kwargs):
""" Returns 1.0 if a pure H2O fluid is saturated.
Returns 0.0 if a pure H2O fluid is undersaturated.
Parameters
----------
pressure float
The total pressure of the system in bars.
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major element oxides in wt% (including H2O).
Returns
-------
float
1.0 if H2O-fluid saturated, 0.0 otherwise.
"""
if pressure > self.calculate_saturation_pressure(temperature=temperature,sample=sample,**kwargs):
return 0.0
else:
return 1.0
def calculate_saturation_pressure(self,temperature,sample,**kwargs):
"""
Calculates the pressure at which a pure H2O fluid is saturated, for the given sample
composition and H2O concentration. Calls the scipy.root_scalar routine, which makes
repeated called to the calculate_dissolved_volatiles method.
Parameters
----------
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major element oxides in wt% (including H2O).
X_fluid float
The mole fraction of H2O in the fluid. Default is 1.0.
Returns
-------
float
Calculated saturation pressure in bars.
"""
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if 'H2O' not in sample:
raise InputError("sample must contain H2O.")
if sample['H2O'] < 0.0:
raise InputError("Dissolved H2O must be greater than 0 wt%.")
try:
satP = root_scalar(self.root_saturation_pressure,args=(temperature,sample,kwargs),
bracket=[1e-15,1e5]).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return satP
def root_saturation_pressure(self,pressure,temperature,sample,kwargs):
""" Function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Pressure guess in bars
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major elements in wt% (normalized to 100%), including H2O.
kwargs dictionary
Additional keyword arguments supplied to calculate_saturation_pressure. Might be required for
the fugacity or activity models.
Returns
-------
float
The differece between the dissolved H2O at the pressure guessed, and the H2O concentration
passed in the sample variable.
"""
return sample['H2O'] - self.calculate_dissolved_volatiles(pressure=pressure,temperature=temperature,sample=sample,**kwargs)
def root_dissolved_volatiles(self,h2o,pressure,temperature,sample,X_fluid,webapp_coeffs,kwargs):
""" Function called by calculate_dissolved_volatiles method when the hydrous parameterization is
being used.
Parameters
----------
h2o float
Guess for the H2O concentration in wt%.
pressure float
Total pressure in bars.
temperature float
Temperature in K.
sample pandas Series or dict
Major element oxides in wt%.
X_fluid float
Mole fraction of H2O in the fluid.
kwargs dictionary
Keyword arguments
Returns
-------
float
Difference between H2O guessed and the H2O calculated.
"""
if webapp_coeffs == False:
a = 0.53
b = 2.35
B = -3.37
C = -0.02
else:
a = 0.52096846
b = 2.11575907
B = -3.24443335
C = -0.02238884
sample_copy = sample.copy()
sample_copy['H2O'] = h2o
NBO_O = self.NBO_O(sample=sample_copy,hydrous_coeffs=True)
fugacity = self.fugacity_model.fugacity(pressure=pressure,X_fluid=X_fluid,temperature=temperature-273.15,**kwargs)
return h2o - np.exp(a*np.log(fugacity) + b*NBO_O + B + C*pressure/temperature)
def NBO_O(self,sample,hydrous_coeffs=True):
"""
Calculates NBO/O according to Appendix A.1. of Iacono-Marziano et al. (2012). NBO/O
is calculated on either a hydrous or anhyrous basis, as set when initialising the
Model class.
Parameters
----------
sample pandas Series or dict
Major element oxides in wt% (including H2O if using the hydrous parameterization).
Returns
-------
float
NBO/O.
"""
if all(ox in sample for ox in ['K2O','Na2O','CaO','MgO','FeO','Al2O3','SiO2','TiO2','Al2O3']) == False:
raise InputError("sample must contain K2O, Na2O, CaO, MgO, FeO, Al2O3, SiO2, TiO2 and Al2O3.")
X = wtpercentOxides_to_molOxides(sample)
NBO = 2*(X['K2O']+X['Na2O']+X['CaO']+X['MgO']+X['FeO']-X['Al2O3'])
O = 2*X['SiO2']+2*X['TiO2']+3*X['Al2O3']+X['MgO']+X['FeO']+X['CaO']+X['Na2O']+X['K2O']
if hydrous_coeffs == True:
if 'H2O' not in sample:
raise InputError("sample must contain H2O.")
NBO = NBO + 2*X['H2O']
O = O + X['H2O']
return NBO/O
class IaconoMarzianoCarbon(Model):
"""
Implementation of the Iacono-Marziano et al. (2012) carbon solubility model, as a Model class. Two
calibrations are provided- the one incorporating the H2O content as a parameter (hydrous), and the
one that does not (anhydrous). Specify which should be used when initialising the model, with the
bool variable hydrous.
"""
def __init__(self):
"""
Initialise the model.
Parameters
----------
hydrous bool
Whether to use the hydrous parameterization, or not.
"""
self.set_volatile_species(['CO2'])
self.set_fugacity_model(fugacity_idealgas())
self.set_activity_model(activity_idealsolution())
self.set_calibration_ranges([])
self.set_solubility_dependence(True)
def preprocess_sample(self,sample):
"""
Returns sample, normalized to 100 wt%, without changing the wt% of H2O and CO2 if the
hydrous parameterization is being used (default). If the anhydrous parameterization is
used, it will normalize without including H2O and CO2.
Parameters
----------
sample pandas Series or dict
Major element oxides in wt%.
Returns
-------
pandas Series or dict
Major element oxides normalized to wt%.
"""
if self.hydrous == True:
return normalize_FixedVolatiles(sample)
else:
return normalize_AdditionalVolatiles(sample)
def calculate_dissolved_volatiles(self,pressure,temperature,sample,X_fluid=1,
hydrous_coeffs=True, **kwargs):
"""
Calculates the dissolved CO2 concentration, using Eq (12) of Iacono-Marziano et al. (2012).
If using the hydrous parameterization, it will use the scipy.root_scalar routine to find the
root of the root_dissolved_volatiles method.
Parameters
----------
pressure float
Total pressure in bars.
temperature float
Temperature in C
sample pandas Series or dict
Major element oxides in wt%.
X_fluid float
Mole fraction of H2O in the fluid. Default is 1.0.
hydrous_coeffs bool
Use the hydrous or anhydrous NBO/O paramterisation (True for hydrous). Default is True.
Returns
-------
float
Dissolved H2O concentration in wt%.
"""
temperature = temperature + 273.15 #translate T from C to K
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if pressure < 0:
raise InputError("Pressure must be positive.")
if temperature <= 0:
raise InputError("Temperature must be greater than 0K.")
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if pressure == 0:
return 0
if hydrous_coeffs == True:
if 'H2O' not in sample:
raise InputError("sample must contain H2O if using the hydrous parameterization.")
if sample['H2O'] < 0:
raise InputError("Dissolved H2O must be positive.")
im_h2o_model = IaconoMarzianoWater()
h2o = im_h2o_model.calculate_dissolved_volatiles(pressure=pressure,temperature=temperature-273.15,
sample=sample,X_fluid=1-X_fluid,**kwargs)
sample_h2o = sample.copy()
sample_h2o['H2O'] = h2o
d = np.array([-16.4,4.4,-17.1,22.8])
a = 1.0
b = 17.3
B = -6.0
C = 0.12
NBO_O = self.NBO_O(sample=sample_h2o,hydrous_coeffs=True)
molarProps = wtpercentOxides_to_molOxides(sample_h2o)
else:
d = np.array([2.3,3.8,-16.3,20.1])
a = 1.0
b = 15.8
B = -5.3
C = 0.14
NBO_O = self.NBO_O(sample=sample,hydrous_coeffs=False)
molarProps = wtpercentOxides_to_molOxides(sample)
fugacity = self.fugacity_model.fugacity(pressure=pressure,X_fluid=X_fluid,temperature=temperature-273.15,**kwargs)
if fugacity == 0:
return 0
if all(ox in molarProps for ox in ['Al2O3','CaO','K2O','Na2O','FeO','MgO','Na2O','K2O']) == False:
raise InputError("sample must contain Al2O3, CaO, K2O, Na2O, FeO, MgO, Na2O, and K2O.")
x = list()
if 'H2O' in molarProps:
x.append(molarProps['H2O'])
else:
x.append(0.0)
x.append(molarProps['Al2O3']/(molarProps['CaO']+molarProps['K2O']+molarProps['Na2O']))
x.append((molarProps['FeO']+molarProps['MgO']))
x.append((molarProps['Na2O']+molarProps['K2O']))
x = np.array(x)
CO3 = np.exp(np.sum(x*d) + a*np.log(fugacity) + b*NBO_O + B + C*pressure/temperature)
CO2 = CO3/1e4#/(12+16*3)*(12+16*2)/1e4
return CO2
def calculate_equilibrium_fluid_comp(self,pressure,temperature,sample,**kwargs):
""" Returns 1.0 if a pure CO2 fluid is saturated.
Returns 0.0 if a pure CO2 fluid is undersaturated.
Parameters
----------
pressure float
The total pressure of the system in bars.
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major element oxides in wt% (including H2O).
Returns
-------
float
1.0 if CO2-fluid saturated, 0.0 otherwise.
"""
if pressure > self.calculate_saturation_pressure(temperature=temperature,sample=sample,**kwargs):
return 0.0
else:
return 1.0
def calculate_saturation_pressure(self,temperature,sample,**kwargs):
"""
Calculates the pressure at which a pure CO2 fluid is saturated, for the given sample
composition and CO2 concentration. Calls the scipy.root_scalar routine, which makes
repeated called to the calculate_dissolved_volatiles method.
Parameters
----------
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major element oxides in wt% (including CO2).
Returns
-------
float
Calculated saturation pressure in bars.
"""
if temperature <= 0:
raise InputError("Temperature must be greater than 0K.")
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if 'CO2' not in sample:
raise InputError("sample must contain CO2")
if sample['CO2'] < 0:
raise InputError("Dissolved CO2 must be greater than 0 wt%.")
try:
satP = root_scalar(self.root_saturation_pressure,args=(temperature,sample,kwargs),
bracket=[1e-15,1e5]).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return satP
def root_saturation_pressure(self,pressure,temperature,sample,kwargs):
""" Function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Pressure guess in bars
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major elements in wt% (normalized to 100%), including CO2.
kwargs dictionary
Additional keyword arguments supplied to calculate_saturation_pressure. Might be required for
the fugacity or activity models.
Returns
-------
float
The differece between the dissolved CO2 at the pressure guessed, and the CO2 concentration
passed in the sample variable.
"""
return sample['CO2'] - self.calculate_dissolved_volatiles(pressure=pressure,temperature=temperature,sample=sample,**kwargs)
def NBO_O(self,sample,hydrous_coeffs=True):
"""
Calculates NBO/O according to Appendix A.1. of Iacono-Marziano et al. (2012). NBO/O
is calculated on either a hydrous or anhyrous basis, as set when initialising the
Model class.
Parameters
----------
sample pandas Series or dict
Major element oxides in wt% (including H2O if using the hydrous parameterization).
Returns
-------
float
NBO/O.
"""
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series,")
if all(ox in sample for ox in ['K2O','Na2O','CaO','MgO','FeO','Al2O3','SiO2','TiO2']) == False:
raise InputError("sample must contain K2O, Na2O, CaO, MgO, FeO, Al2O3, SiO2, and TiO2.")
X = wtpercentOxides_to_molOxides(sample)
NBO = 2*(X['K2O']+X['Na2O']+X['CaO']+X['MgO']+X['FeO']-X['Al2O3'])
O = 2*X['SiO2']+2*X['TiO2']+3*X['Al2O3']+X['MgO']+X['FeO']+X['CaO']+X['Na2O']+X['K2O']
if hydrous_coeffs == True:
if 'H2O' not in X:
raise InputError("sample must contain H2O if using the hydrous parameterization.")
NBO = NBO + 2*X['H2O']
O = O + X['H2O']
return NBO/O
class EguchiCarbon(Model):
"""
Implementation of the Eguchi and Dasgupta (2018) CO2 solubility model for andesitic melts.
Uses the Zhang and Duan (2009) CO2 EOS for fugacity calculations, assuming a pure CO2 fluid,
or ideal mixing for mixed fluids.
"""
def __init__(self):
warnings.warn("Eguchi model is not working correctly. Do not use any results calculated.")
self.set_volatile_species(['CO2'])
self.set_fugacity_model(fugacity_ZD09_co2())
self.set_activity_model(activity_idealsolution())
self.set_solubility_dependence(False)
self.set_calibration_ranges([CalibrationRange('pressure',[500.0,50000.0],crf_Between,'bar','Eguchi & Dasgupta (2018) carbon',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',[950.0,1600],crf_Between,'oC','Eguchi & Dasgupta (2018) carbon',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description)])
def preprocess_sample(self,sample,ferric_total=0.15):
""" Returns normalized sample composition, with ferric iron. Where a sample
already contains ferric iron, the composition will be normalized to 100 wt%
(excluding H2O and CO2). Where a sample contains only FeO, ferric iron will
be calculated using the ferric/total iron ratio provided.
Parameters
----------
sample pandas Series or dict
Major element oxides in wt%.
ferric_total float
Mole ratio of ferric to total iron to be used
for calculating Fe2O3 and FeO when only FeO is
provided. Default is 0.15.
Returns
-------
pandas Series or dict
Normalized major element oxides in wt%.
"""
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if 'FeO' not in sample:
raise InputError("sample must contain FeO.")
_sample = sample.copy()
for ox in ['TiO2','P2O5']:
if ox not in _sample:
_sample[ox] = 0.0
if 'Fe2O3' not in _sample:
Fe_t = _sample['FeO']/oxideMass['FeO']
Fe3 = ferric_total*Fe_t
Fe2 = Fe_t - Fe3
_sample['FeO'] = Fe2*oxideMass['FeO']
_sample['Fe2O3'] = Fe3*oxideMass['Fe2O3']/2
return normalize_AdditionalVolatiles(_sample)
def calculate_dissolved_volatiles(self,pressure,temperature,sample,X_fluid=1.0,**kwargs):
"""
Calculates the dissolved (total) CO2 using eqs (9) and (10) of Eguchi and Dasgupta (2018).
Parameters
----------
pressure float
Pressure in bars
temperature float
Temperature in C
sample pandas Series or dict
Major element oxides in wt%.
X_fluid float
The mole fraction of CO2 in the fluid.
Returns
-------
float
Dissolved CO2 concentration.
"""
if pressure < 0:
raise InputError("Pressure must be greater than 0 bar.")
if pressure == 0:
return 0
XCO3 = self.Xi_melt(pressure=pressure,temperature=temperature,sample=sample,X_fluid=X_fluid,species='CO3')
XCO2 = self.Xi_melt(pressure=pressure,temperature=temperature,sample=sample,X_fluid=X_fluid,species='CO2')
FW_one = wtpercentOxides_to_formulaWeight(sample)
CO2_CO2 = ((44.01*XCO2)/(44.01*XCO2+(1-(XCO2+XCO3))*FW_one))*100
CO2_CO3 = ((44.01*XCO3)/(44.01*XCO3+(1-(XCO2+XCO3))*FW_one))*100
return CO2_CO2 + CO2_CO3
def calculate_equilibrium_fluid_comp(self,pressure,temperature,sample,**kwargs):
""" Returns 1.0 if a pure CO2 fluid is saturated.
Returns 0.0 if a pure CO2 fluid is undersaturated.
Parameters
----------
pressure float
The total pressure of the system in bars.
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major element oxides in wt% (including H2O).
Returns
-------
float
1.0 if CO2-fluid saturated, 0.0 otherwise.
"""
satP = self.calculate_saturation_pressure(temperature=temperature,sample=sample,X_fluid=1.0,**kwargs)
if pressure < satP:
return 1.0
else:
return 0.0
def calculate_saturation_pressure(self,temperature,sample,X_fluid=1.0,**kwargs):
"""
Calculates the pressure at which a pure CO2 fluid is saturated, for the given sample
composition and CO2 concentration. Calls the scipy.root_scalar routine, which makes
repeated called to the calculate_dissolved_volatiles method.
Parameters
----------
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major element oxides in wt% (including CO2).
X_fluid float
The mole fraction of H2O in the fluid. Default is 1.0.
Returns
-------
float
Calculated saturation pressure in bars.
"""
if 'CO2' not in sample:
raise InputError("sample must contain CO2.")
if sample['CO2'] < 0.0:
raise InputError("Concentration of CO2 must be greater than 0 wt%.")
try:
satP = root_scalar(self.root_saturation_pressure,x0=1000.0,x1=2000.0,
args=(temperature,sample,X_fluid,kwargs)).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return satP
def root_saturation_pressure(self,pressure,temperature,sample,X_fluid,kwargs):
""" Function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Pressure guess in bars
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major elements in wt% (normalized to 100%), including CO2.
kwargs dictionary
Additional keyword arguments supplied to calculate_saturation_pressure. Might be required for
the fugacity or activity models.
Returns
-------
float
The differece between the dissolved CO2 at the pressure guessed, and the CO2 concentration
passed in the sample variable.
"""
return sample['CO2'] - self.calculate_dissolved_volatiles(pressure=pressure,temperature=temperature,sample=sample,X_fluid=X_fluid,**kwargs)
def Xi_melt(self,pressure,temperature,sample,species,X_fluid=1.0,**kwargs):
"""
Calculates the mole fraction of dissolved molecular CO2 or carbonate CO3(2-), using
eqn (9) of Eguchi and Dasgupta (2018).
Parameters
----------
pressure float
Pressure in bars.
temperature float
Temperature in C.
sample pandas Series or dict
Major element oxides in wt%.
species str
Which species to calculate, molecular CO2 'CO2' or carbonate ion 'CO3'.
X_fluid float
The mole fraction of CO2 in the fluid. Default is 1.0.
Returns
-------
float
Mole fraction of selected species in the melt
"""
temperature = temperature + 273.15 #translate T from C to K
if all(ox in sample for ox in ['MgO','CaO','FeO','Na2O','K2O','MnO','Al2O3','Fe2O3','SiO2','TiO2','P2O5']) == False:
raise InputError("sample must contain MgO, CaO, FeO, Na2O, K2O, MnO, Al2O3, Fe2O3, SiO3, TiO2, and P2O5.")
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if pressure < 0:
raise InputError("Pressure must be positive.")
if temperature <= 0:
raise InputError("Temperature must be greater than 0K.")
if species == 'CO3':
DH = -1.65e5
DV = 2.38e-5
DS = -43.64
B = 1.47e3
yNBO = 3.29
A_CaO = 1.68e5
A_Na2O = 1.76e5
A_K2O = 2.11e5
elif species == 'CO2':
DH = -9.02e4
DV = 1.92e-5
DS = -43.08
B = 1.12e3
yNBO = -7.09
A_CaO = 0
A_Na2O = 0
A_K2O = 0
else:
raise InputError("species variable must be either 'CO2' or 'CO3'.")
R = 8.314
# Calculate NBO term
cations = wtpercentOxides_to_molSingleO(sample)
oxides = wtpercentOxides_to_molOxides(sample)
NM = (cations['Mg'] + cations['Ca'] + cations['Fe'] + cations['Na'] +
cations['K'] + cations['Mn'])
Al = cations['Al'] - NM
if Al > 0:
Al = NM
else:
Al = cations['Al']
Fe = cations['Fe3'] + Al
if Al > 0:
Fe = 0
if Al < 0 and Fe > 0:
Fe = - Al
if Al < 0 and Fe < 0:
Fe = cations['Fe3']
Tet = cations['Si'] + cations['Ti'] + cations['P'] + Al + Fe
NBO = 2 - 4*Tet
lnfCO2 = np.log(self.fugacity_model.fugacity(pressure=pressure,temperature=temperature-273.15,X_fluid=X_fluid))
lnXi = ((DH/(R*temperature)-(pressure*1e5*DV)/(R*temperature)+DS/R) +
(A_CaO*oxides['CaO']+A_Na2O*oxides['Na2O']+A_K2O*oxides['K2O'])/(R*temperature) +
(B*lnfCO2/temperature) + yNBO*NBO
)
return np.exp(lnXi)
class MooreWater(Model):
"""
Implementation of the Moore et al. (1998) H2O solubility model for magmas up to 3,000 bars.
"""
def __init__(self):
"""
Initialize the model.
"""
self.set_volatile_species(['H2O'])
self.set_fugacity_model(fugacity_HB_h2o())
self.set_activity_model(activity_idealsolution())
self.set_solubility_dependence(False)
self.set_calibration_ranges([CalibrationRange('pressure',[1.0,3000.0],crf_Between,'bar','Moore et al. (1998) water',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',[700.0,1200],crf_Between,'oC','Moore et al. (1998) water',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description)])
# self.set_calibration_ranges([cr_Between('pressure',[1.0,3000.0],'bar','Moore et al. (1998) water'),
# cr_Between('temperature',[700.0+273.15,1200+273.15],'oC','Moore et al. (1998) water')])
def preprocess_sample(self, sample):
"""
Returns sample with extranneous (non oxide) information removed and any missing oxides given a value of 0.0.
"""
for oxide in oxides:
if oxide in sample.keys():
pass
else:
sample[oxide] = 0.0
bulk_comp = {oxide: sample[oxide] for oxide in oxides}
self.bulk_comp_orig = sample
return bulk_comp
def calculate_dissolved_volatiles(self, sample, pressure, temperature, X_fluid=1.0, **kwargs):
"""
Parameters
----------
sample dict
Composition of sample in wt% oxides.
pressure float
Pressure in bars.
temperature float
Temperature in degrees C.
X_fluid float
OPTIONAL. Default is 1.0. Mole fraction of H2O in the H2O-CO2 fluid.
Returns
-------
float
Calculated dissolved H2O concentration in wt%.
"""
_sample = sample.copy()
_sample['H2O'] = 0.0
_sample['CO2'] = 0.0
_sample = normalize(_sample)
fH2O = self.fugacity_model.fugacity(pressure=pressure,temperature=temperature,X_fluid=X_fluid,**kwargs)
aParam = 2565.0
bParam_Al2O3 = -1.997
bParam_FeOt = -0.9275
bParam_Na2O = 2.736
cParam = 1.171
dParam = -14.21
temperatureK = temperature + 273.15
sample_molfrac = wtpercentOxides_to_molOxides(_sample)
FeOtot = sample_molfrac['FeO'] + sample_molfrac['Fe2O3']*0.8998
b_x_sum = (bParam_Al2O3 * sample_molfrac['Al2O3']) + (bParam_FeOt * FeOtot) + (bParam_Na2O * sample_molfrac['Na2O'])
two_ln_XH2Omelt = (aParam / temperatureK) + b_x_sum * (pressure/temperatureK) + cParam * np.log(fH2O) + dParam
ln_XH2Omelt = two_ln_XH2Omelt / 2.0
XH2Omelt = np.exp(ln_XH2Omelt)
sample_molfrac['H2O'] = XH2Omelt
#Normalize mol fractions to sum to 1, while preserving XH2O
for key, value in sample_molfrac.items():
if key != 'H2O':
sample_molfrac.update({key: value/((1/(1-sample_molfrac['H2O'])))})
sample_wtper = mol_to_wtpercent(sample_molfrac)
return sample_wtper['H2O']
def calculate_equilibrium_fluid_comp(self, sample, pressure, temperature, **kwargs):
"""
Parameters
----------
sample dict
Composition of sample in wt% oxides.
pressure float
Pressure in bars.
temperature float
Temperature in degrees C.
Returns
-------
float
Calculated equilibrium fluid concentration in XH2Ofluid mole fraction.
"""
_sample = sample.copy()
sample_anhy = sample.copy()
sample_anhy["H2O"] = 0.0
sample_anhy["CO2"] = 0.0
aParam = 2565.0
bParam_Al2O3 = -1.997
bParam_FeOt = -0.9275
bParam_Na2O = 2.736
cParam = 1.171
dParam = -14.21
temperatureK = temperature + 273.15
sample_molfrac_anhy = wtpercentOxides_to_molOxides(sample_anhy)
sample_molfrac_hy = wtpercentOxides_to_molOxides(_sample)
FeOtot = sample_molfrac_anhy['FeO'] + sample_molfrac_anhy['Fe2O3']*0.8998
b_x_sum = (bParam_Al2O3 * sample_molfrac_anhy['Al2O3']) + (bParam_FeOt * FeOtot) + (bParam_Na2O * sample_molfrac_anhy['Na2O'])
ln_fH2O = (2 * np.log(sample_molfrac_hy['H2O']) - (aParam/temperatureK) - b_x_sum * (pressure/temperatureK) - dParam) / cParam
fH2O = np.exp(ln_fH2O)
XH2O_fl = fH2O / pressure
# SM: I've changed this to return X_H2O only, as otherwise it doesn't conform to other single-volatile
# models. I'm not sure this is the best solution though.
# return (XCO2_fl, XH2O_fl)
return XH2O_fl
def calculate_saturation_pressure(self,temperature,sample,X_fluid=1.0,**kwargs):
"""
Calculates the pressure at which a an H2O-bearing fluid is saturated. Calls the scipy.root_scalar
routine, which makes repeated called to the calculate_dissolved_volatiles method.
Parameters
----------
sample dict
Composition of sample in wt% oxides.
temperature float
Temperature in degrees C.
X_fluid float
OPTIONAL. Default is 1.0. Mole fraction of H2O in the H2O-CO2 fluid.
Returns
-------
float
Calculated saturation pressure in bars.
"""
_sample = sample.copy()
temperatureK = temperature + 273.15
if temperatureK <= 0.0:
raise InputError("Temperature must be greater than 0K.")
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if 'H2O' not in sample:
raise InputError("sample must contain H2O.")
if sample['H2O'] < 0.0:
raise InputError("Dissolved H2O concentration must be greater than 0 wt%.")
try:
satP = root_scalar(self.root_saturation_pressure,args=(temperature,_sample,X_fluid,kwargs),
x0=100.0,x1=2000.0).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return np.real(satP)
def root_saturation_pressure(self,pressure,temperature,sample,X_fluid,kwargs):
""" Function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Pressure guess in bars
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major elements in wt% (normalized to 100%), including H2O.
kwargs dictionary
Additional keyword arguments supplied to calculate_saturation_pressure. Might be required for
the fugacity or activity models.
Returns
-------
float
The differece between the dissolved H2O at the pressure guessed, and the H2O concentration
passed in the sample variable.
"""
return self.calculate_dissolved_volatiles(pressure=pressure,temperature=temperature,sample=sample,X_fluid=X_fluid,**kwargs) - sample['H2O']
class LiuWater(Model):
"""
Implementation of the Liu et al. (2005) H2O solubility model for metaluminous high-silica rhyolitic melts.
"""
def __init__(self):
"""
Initialize the model.
"""
self.set_volatile_species(['H2O'])
self.set_fugacity_model(fugacity_idealgas())
self.set_activity_model(activity_idealsolution())
self.set_solubility_dependence(False)
self.set_calibration_ranges([CalibrationRange('pressure',[1.0,5000.0],crf_Between,'bar','Liu et al. (2005) water',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',[700.0,1200],crf_Between,'oC','Liu et al. (2005) water',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('sample',None,crf_LiuComp,None,None,
fail_msg=crmsg_LiuComp_fail, pass_msg=crmsg_LiuComp_pass, description_msg=crmsg_LiuComp_description)])
# self.set_calibration_ranges([cr_Between('pressure',[1.0,5000.0],'bar','Liu et al. (2005) water'),
# cr_Between('temperature',[700.0,1200],'oC','Liu et al. (2005) water')])
def preprocess_sample(self, sample):
"""
Returns sample with extranneous (non oxide) information removed and any missing oxides given a value of 0.0.
"""
for oxide in oxides:
if oxide in sample.keys():
pass
else:
sample[oxide] = 0.0
bulk_comp = {oxide: sample[oxide] for oxide in oxides}
self.bulk_comp_orig = sample
return bulk_comp
def calculate_dissolved_volatiles(self, sample, pressure, temperature, X_fluid=1.0, **kwargs):
"""
Parameters
----------
sample dict
Composition of sample in wt% oxides.
pressure float
Pressure in bars.
temperature float
Temperature in degrees C.
X_fluid float
OPTIONAL. Default is 1.0. Mole fraction of H2O in the H2O-CO2 fluid.
Returns
-------
float
Calculated dissolved H2O concentration in wt%.
"""
pressureMPa = pressure / 10.0
Pw = pressureMPa * X_fluid
PCO2 = pressureMPa * (1 - X_fluid)
temperatureK = temperature + 273.15
H2Ot = ((354.94*Pw**(0.5) + 9.623*Pw - 1.5223*Pw**(1.5)) / temperatureK +
0.0012439*Pw**(1.5) + PCO2*(-1.084*10**(-4)*Pw**(0.5) - 1.362*10**(-5)*Pw))
return H2Ot
def calculate_equilibrium_fluid_comp(self, sample, pressure, temperature, **kwargs):
"""
Parameters
----------
sample dict
Composition of sample in wt% oxides.
pressure float
Pressure in bars.
temperature float
Temperature in degrees C.
Returns
-------
float
Calculated equilibrium fluid concentration in XH2Ofluid mole fraction.
"""
temperatureK = temperature + 273.15
pressureMPa = pressure / 10.0
_sample = sample.copy()
H2Ot = _sample["H2O"]
#calculate saturation pressure and assert that input P <= SatP
satP = self.calculate_saturation_pressure(temperature,sample)
is_saturated = satP - pressure
if is_saturated >= 0:
pass
else:
warnings.warn("{:.1f} bars is above the saturation pressure ({:.1f} bars) for this sample. Results from this calculation may be nonsensical.".format(pressure,satP))
#Use sympy to solve solubility equation for XH2Ofluid
XH2Ofluid = sympy.symbols('XH2Ofluid') #XH2Ofluid is the variable to solve for
equation = ((354.94*(XH2Ofluid*pressureMPa)**(0.5) + 9.623*(XH2Ofluid*pressureMPa)
- 1.5223*(XH2Ofluid*pressureMPa)**(1.5)) / temperatureK
+ 0.0012439*(XH2Ofluid*pressureMPa)**(1.5)
+ pressureMPa*(1-XH2Ofluid)*(-1.084*10**(-4)*(XH2Ofluid*pressureMPa)**(0.5)
- 1.362*10**(-5)*(XH2Ofluid*pressureMPa)) - H2Ot)
XH2Ofluid = sympy.solve(equation, XH2Ofluid)[0]
if XH2Ofluid > 1:
XH2Ofluid = 1
if XH2Ofluid < 0:
XH2Ofluid = 0
return XH2Ofluid
def calculate_saturation_pressure(self,temperature,sample,X_fluid=1.0,**kwargs):
"""
Calculates the pressure at which a an H2O-bearing fluid is saturated. Calls the scipy.root_scalar
routine, which makes repeated called to the calculate_dissolved_volatiles method.
Parameters
----------
sample dict
Composition of sample in wt% oxides.
temperature float
Temperature in degrees C.
X_fluid float
OPTIONAL. Default is 1.0. Mole fraction of H2O in the H2O-CO2 fluid.
Returns
-------
float
Calculated saturation pressure in bars.
"""
_sample = sample.copy()
temperatureK = temperature + 273.15
if temperatureK <= 0.0:
raise InputError("Temperature must be greater than 0K.")
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if 'H2O' not in sample:
raise InputError("sample must contain H2O.")
if sample['H2O'] < 0.0:
raise InputError("Dissolved H2O concentration must be greater than 0 wt%.")
try:
satP = root_scalar(self.root_saturation_pressure,args=(temperature,_sample,X_fluid,kwargs),
x0=10.0,x1=200.0).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return np.real(satP)
def root_saturation_pressure(self,pressure,temperature,sample,X_fluid,kwargs):
""" Function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Pressure guess in bars
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major elements in wt% (normalized to 100%), including H2O.
kwargs dictionary
Additional keyword arguments supplied to calculate_saturation_pressure. Might be required for
the fugacity or activity models.
Returns
-------
float
The differece between the dissolved H2O at the pressure guessed, and the H2O concentration
passed in the sample variable.
"""
return self.calculate_dissolved_volatiles(pressure=pressure,temperature=temperature,sample=sample,X_fluid=X_fluid,**kwargs) - sample['H2O']
class LiuCarbon(Model):
"""
Implementation of the Liu et al. (2005) H2O-CO2 solubility model for metaluminous high-silica rhyolitic melts.
"""
def __init__(self):
"""
Initialize the model.
"""
self.set_volatile_species(['CO2'])
self.set_fugacity_model(fugacity_idealgas())
self.set_activity_model(activity_idealsolution())
self.set_solubility_dependence(False)
self.set_calibration_ranges([CalibrationRange('pressure',[1.0,5000.0],crf_Between,'bar','Liu et al. (2005) carbon',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',[700.0,1200],crf_Between,'oC','Liu et al. (2005) carbon',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('sample',None,crf_LiuComp,None,None,
fail_msg=crmsg_LiuComp_fail, pass_msg=crmsg_LiuComp_pass, description_msg=crmsg_LiuComp_description)])
def preprocess_sample(self, sample):
"""
Returns sample with extranneous (non oxide) information removed and any missing oxides given a value of 0.0.
"""
for oxide in oxides:
if oxide in sample.keys():
pass
else:
sample[oxide] = 0.0
bulk_comp = {oxide: sample[oxide] for oxide in oxides}
self.bulk_comp_orig = sample
return bulk_comp
def calculate_dissolved_volatiles(self, sample, pressure, temperature, X_fluid=1, **kwargs):
"""
Parameters
----------
sample dict
Composition of sample in wt% oxides.
pressure float
Pressure in bars.
temperature float
Temperature in degrees C.
X_fluid float
OPTIONAL. Default is 1. Mole fraction of CO2 in the H2O-CO2 fluid.
Returns
-------
float
Calculated dissolved CO2 concentration in wt%.
"""
pressureMPa = pressure / 10.0
Pw = pressureMPa * (1 - X_fluid)
PCO2 = pressureMPa * X_fluid #(1 - X_fluid)
temperatureK = temperature + 273.15
CO2melt_ppm = (PCO2*(5668 - 55.99*Pw)/temperatureK
+ PCO2*(0.4133*Pw**(0.5) + 2.041*10**(-3)*Pw**(1.5)))
CO2melt = CO2melt_ppm / 10000
return CO2melt
def calculate_equilibrium_fluid_comp(self, sample, pressure, temperature, **kwargs):
"""
Parameters
----------
sample dict
Composition of sample in wt% oxides.
pressure float
Pressure in bars.
temperature float
Temperature in degrees C.
Returns
-------
float
Calculated equilibrium fluid concentration in XCO2fluid mole fraction.
"""
temperatureK = temperature + 273.15
pressureMPa = pressure / 10.0
if temperatureK <= 0.0:
raise InputError("Temperature must be greater than 0K.")
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if 'CO2' not in sample:
raise InputError("sample must contain CO2.")
if sample['CO2'] < 0.0:
raise InputError("Dissolved CO2 concentration must be greater than 0 wt%.")
_sample = sample.copy()
CO2melt_wt = _sample["CO2"]
CO2melt_ppm = CO2melt_wt * 10000
#calculate saturation pressure and assert that input P <= SatP
satP = self.calculate_saturation_pressure(temperature,sample)
is_saturated = satP - pressure
if is_saturated >= 0:
pass
else:
warnings.warn(str(pressure) + " bars is above the saturation pressure (" + str(satP) + " bars) for this sample. Results from this calculation may be nonsensical.")
#Use sympy to solve solubility equation for XH2Ofluid
XCO2fluid = sympy.symbols('XCO2fluid') #XCO2fluid is the variable to solve for
equation = (((XCO2fluid*pressureMPa)*(5668 - 55.99*(pressureMPa*(1-XCO2fluid)))/temperatureK
+ (XCO2fluid*pressureMPa)*(0.4133*(pressureMPa*(1-XCO2fluid))**(0.5)
+ 2.041*10**(-3)*(pressureMPa*(1-XCO2fluid))**(1.5))) - CO2melt_ppm)
XCO2fluid = sympy.solve(equation, XCO2fluid)[0]
if XCO2fluid > 1:
XCO2fluid = 1
if XCO2fluid < 0:
XCO2fluid = 0
return XCO2fluid #1 - XCO2fluid
def calculate_saturation_pressure(self,temperature,sample,X_fluid=1.0,**kwargs):
"""
Calculates the pressure at which a an CO2-bearing fluid is saturated. Calls the scipy.root_scalar
routine, which makes repeated called to the calculate_dissolved_volatiles method.
Parameters
----------
sample dict
Composition of sample in wt% oxides.
temperature float
Temperature in degrees C.
X_fluid float
OPTIONAL. Default is 0. Mole fraction of CO2 in the H2O-CO2 fluid.
Returns
-------
float
Calculated saturation pressure in bars.
"""
_sample = sample.copy()
temperatureK = temperature + 273.15
if temperatureK <= 0.0:
raise InputError("Temperature must be greater than 0K.")
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if 'CO2' not in sample:
raise InputError("sample must contain CO2.")
if sample['CO2'] < 0.0:
raise InputError("Dissolved CO2 concentration must be greater than 0 wt%.")
try:
satP = root_scalar(self.root_saturation_pressure,args=(temperature,_sample,X_fluid,kwargs),
x0=10.0,x1=2000.0).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return np.real(satP)
def root_saturation_pressure(self,pressure,temperature,sample,X_fluid,kwargs):
""" Function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Pressure guess in bars
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major elements in wt% (normalized to 100%), including H2O.
kwargs dictionary
Additional keyword arguments supplied to calculate_saturation_pressure. Might be required for
the fugacity or activity models.
Returns
-------
float
The differece between the dissolved H2O at the pressure guessed, and the H2O concentration
passed in the sample variable.
"""
return self.calculate_dissolved_volatiles(pressure=pressure,temperature=temperature,sample=sample,X_fluid=X_fluid,**kwargs) - sample['CO2']
class AllisonCarbon(Model):
"""
Implementation of the Allison et al. (2019) CO2 solubility model. Which type of fit, and
which composition must be selected when the Model is initialized. The fit may be either
thermodynamic or power-law. The composition may be chosen from sunset, sfvf, erebus, vesuvius,
etna, or stromboli. Default is the power-law fit to sunset.
"""
def __init__(self):
"""
Initialize the model.
"""
self.set_volatile_species(['CO2'])
self.set_fugacity_model(fugacity_HB_co2())
self.set_activity_model(activity_idealsolution())
self.set_calibration_ranges([CalibrationRange('pressure',[0.0,6000.0],crf_Between,'bar','Allison et al. (2019) carbon',
fail_msg=crmsg_Between_fail, pass_msg=crmsg_Between_pass, description_msg=crmsg_Between_description),
CalibrationRange('temperature',1200,crf_EqualTo,'oC','Allison et al. (2019) carbon',
fail_msg=crmsg_EqualTo_fail, pass_msg=crmsg_EqualTo_pass, description_msg=crmsg_EqualTo_description)])
self.set_solubility_dependence(False)
def preprocess_sample(self,sample):
"""
Returns sample normalized to 100wt%, keeping the concentrations of H2O and CO2 constant.
Parameters
----------
sample pandas Series or dict
Major element oxides in wt%.
Returns
-------
pandas Series
Normalized major element oxides in wt%.
"""
return normalize_AdditionalVolatiles(sample)
def calculate_dissolved_volatiles(self,pressure,temperature,sample=None,X_fluid=1.0,
model_loc='sunset',model_fit='thermodynamic',**kwargs):
"""
Calclates the dissolved CO2 concentration using (Eqns) 2-7 or 10-11 from Allison et al. (2019).
Parameters
----------
pressure float
Pressure in bars.
temperature float
Temperature in C.
sample pandas Series, dict or None
Major element oxides in wt%. Required if using the thermodynamic fits, need not be
provided if using the power law fits. Default is None.
X_fluid float
The mole fraction of CO2 in the fluid. Default is 1.0.
model_fit str
Either 'power' for the power-law fits, or 'thermodynamic' for the
thermodynamic fits.
model_loc str
One of 'sunset', 'sfvf', 'erebus', 'vesuvius', 'etna', 'stromboli'.
Returns
-------
float
Dissolved CO2 concentration in wt%.
"""
temperature = temperature + 273.15 #translate T from C to K
if temperature <= 0.0:
raise InputError("Temperature must be greater than 0K.")
if pressure < 0.0:
raise InputError("Pressure must be positive.")
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if model_fit not in ['power','thermodynamic']:
raise InputError("model_fit must be one of 'power', or 'thermodynamic'.")
if model_loc not in ['sunset','sfvf','erebus','vesuvius','etna','stromboli']:
raise InputError("model_loc must be one of 'sunset', 'sfvf', 'erebus', 'vesuvius', 'etna', or 'stromboli'.")
if pressure == 0:
return 0
if model_fit == 'thermodynamic':
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("Thermodynamic fit requires sample to be a dict or a pandas Series.")
P0 = 1000 # bar
params = dict({'sunset':[16.4,-14.67],
'sfvf':[15.02,-14.87],
'erebus':[15.83,-14.65],
'vesuvius':[24.42,-14.04],
'etna':[21.59,-14.28],
'stromboli':[14.93,-14.68]})
DV = params[model_loc][0]
lnK0 = params[model_loc][1]
lnK = lnK0 - (pressure-P0)*DV/(10*8.3141*temperature)
fCO2 = self.fugacity_model.fugacity(pressure=pressure,temperature=temperature-273.15,X_fluid=X_fluid,**kwargs)
Kf = np.exp(lnK)*fCO2
XCO3 = Kf/(1-Kf)
# FWone = wtpercentOxides_to_formulaWeight(sample)#,exclude_volatiles=True)
FWone = 36.594
wtCO2 = (44.01*XCO3)/((44.01*XCO3)+(1-XCO3)*FWone)*100
return wtCO2
if model_fit == 'power':
params = dict({'stromboli':[1.05,0.883],
'etna':[2.831,0.797],
'vesuvius':[4.796,0.754],
'sfvf':[3.273,0.74],
'sunset':[4.32,0.728],
'erebus':[5.145,0.713]})
fCO2 = self.fugacity_model.fugacity(pressure=pressure,temperature=temperature-273.15,X_fluid=X_fluid,**kwargs)
return params[model_loc][0]*fCO2**params[model_loc][1]/1e4
def calculate_equilibrium_fluid_comp(self,pressure,temperature,sample,**kwargs):
""" Returns 1.0 if a pure CO2 fluid is saturated.
Returns 0.0 if a pure CO2 fluid is undersaturated.
Parameters
----------
pressure float
The total pressure of the system in bars.
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major element oxides in wt% (including H2O).
Returns
-------
float
1.0 if CO2-fluid saturated, 0.0 otherwise.
"""
satP = self.calculate_saturation_pressure(temperature=temperature,sample=sample,X_fluid=1.0,**kwargs)
if pressure < satP:
return 1.0
else:
return 0.0
def calculate_saturation_pressure(self,temperature,sample,X_fluid=1.0,**kwargs):
"""
Calculates the pressure at which a pure CO2 fluid is saturated, for the given sample
composition and CO2 concentration. Calls the scipy.root_scalar routine, which makes
repeated called to the calculate_dissolved_volatiles method.
Parameters
----------
temperature float
The temperature of the system in C.
sample pandas Series
Major element oxides in wt% (including CO2).
X_fluid float
The mole fraction of H2O in the fluid. Default is 1.0.
Returns
-------
float
Calculated saturation pressure in bars.
"""
if temperature <= 0.0:
raise InputError("Temperature must be greater than 0K.")
if X_fluid < 0 or X_fluid > 1:
raise InputError("X_fluid must have a value between 0 and 1.")
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
if 'CO2' not in sample:
raise InputError("sample must contain CO2.")
if sample['CO2'] < 0.0:
raise InputError("Dissolved CO2 concentration must be greater than 0 wt%.")
try:
satP = root_scalar(self.root_saturation_pressure,args=(temperature,sample,X_fluid,kwargs),
x0=1000.0,x1=2000.0).root
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return satP
def root_saturation_pressure(self,pressure,temperature,sample,X_fluid,kwargs):
""" Function called by scipy.root_scalar when finding the saturation pressure using
calculate_saturation_pressure.
Parameters
----------
pressure float
Pressure guess in bars
temperature float
The temperature of the system in C.
sample pandas Series or dict
Major elements in wt% (normalized to 100%), including CO2.
kwargs dictionary
Additional keyword arguments supplied to calculate_saturation_pressure. Might be required for
the fugacity or activity models.
Returns
-------
float
The differece between the dissolved CO2 at the pressure guessed, and the CO2 concentration
passed in the sample variable.
"""
return sample['CO2'] - self.calculate_dissolved_volatiles(pressure=pressure,temperature=temperature,sample=sample,X_fluid=X_fluid,**kwargs)
#------------MIXED FLUID MODELS-------------------------------#
class MixedFluid(Model):
"""
Implements the generic framework for mixed fluid solubility. Any set of pure fluid solubility
models may be specified.
"""
def __init__(self,models):
"""
Initializes the mixed fluid model.
Parameters
----------
models dictionary
Dictionary with names of volatile species as keys, and the model objects as values.
"""
self.models = tuple(model for model in models.values())
self.set_volatile_species(list(models.keys()))
def preprocess_sample(self,sample):
""" Returns sample, unmodified.
Parameters
----------
sample pandas Series or dict
Major element oxides in wt%.
Returns
-------
pandas Series or dict
Major element oxides in wt%.
"""
if type(sample) != dict and type(sample) != pd.core.series.Series:
raise InputError("sample must be a dict or a pandas Series.")
_sample = sample.copy()
_sample = self.models[0].preprocess_sample(_sample)
return _sample
def calculate_dissolved_volatiles(self,pressure,X_fluid,returndict=False,**kwargs):
"""
Calculates the dissolved volatile concentrations in wt%, using each model's
calculate_dissolved_volatiles method. At present the volatile concentrations are
not propagated through.
Parameters
----------
pressure float
The total pressure in bars.
X_fluid float, numpy.ndarry, dict, pandas Series
The mole fraction of each species in the fluid. If the mixed fluid model
contains only two species (e.g. CO2 and H2O), the value of the first species in
self.volatile_species may be passed on its own as a float.
returndict bool
If True, the results will be returned in a dict, otherwise they will be returned
as a tuple.
Returns
-------
tuple
Dissolved volatile concentrations of each species in the model, in the order set
by self.volatile_species.
"""
if (type(X_fluid) == float or type(X_fluid) == int) and len(self.volatile_species) == 2:
X_fluid = (X_fluid,1-X_fluid)
elif len(X_fluid) != len(self.volatile_species):
raise InputError("X_fluid must have the same length as the number of volatile species\
in the MixedFluids Model class, or it may have length 1 if two species are present\
in the MixedFluids Model class.")
if np.sum(X_fluid) != 1.0:
raise InputError("X_fluid must sum to 1.0")
if any(val<0 for val in X_fluid) or any(val>1 for val in X_fluid):
raise InputError("Each mole fraction in X_fluid must have a value between 0 and 1.")
if type(X_fluid) == dict or type(X_fluid) == pd.core.series.Series:
X_fluid = tuple(X_fluid[species] for species in self.volatile_species)
# If the models don't depend on the concentration of volatiles, themselves.
if all(model.solubility_dependence == False for model in self.models):
result = tuple(model.calculate_dissolved_volatiles(pressure=pressure,X_fluid=Xi,**kwargs) for model, Xi in zip(self.models,X_fluid))
# If one of the models depends on the other volatile concentration
elif len(self.models) == 2 and self.models[0].solubility_dependence == False and 'sample' in kwargs:
result0 = self.models[0].calculate_dissolved_volatiles(pressure=pressure,X_fluid=X_fluid[0],**kwargs)
samplecopy = kwargs['sample'].copy()
samplecopy[self.volatile_species[0]] = result0
kwargs['sample'] = samplecopy
result1 = self.models[1].calculate_dissolved_volatiles(pressure=pressure,X_fluid=X_fluid[1],**kwargs)
result = (result0,result1)
elif len(self.models) == 2 and self.models[1].solubility_dependence == False and 'sample' in kwargs:
result1 = self.models[1].calculate_dissolved_volatiles(pressure=pressure,X_fluid=X_fluid[1],**kwargs)
samplecopy = kwargs['sample'].copy()
samplecopy[self.volatile_species[1]] = result1
kwargs['sample'] = samplecopy
result0 = self.models[0].calculate_dissolved_volatiles(pressure=pressure,X_fluid=X_fluid[0],**kwargs)
result = (result0,result1)
else:
raise InputError("The solubility dependence of the models is not currently supported by the MixedFluid model.")
if returndict == True:
resultsdict = {}
for i,v in zip(range(len(self.volatile_species)),self.volatile_species):
resultsdict.update({v+'_liq':result[i]})
return resultsdict
else:
return result
def calculate_equilibrium_fluid_comp(self,pressure,sample,return_dict=True,**kwargs):
""" Calculates the composition of the fluid in equilibrium with the dissolved volatile
concentrations passed. If a fluid phase is undersaturated at the chosen pressure (0,0) will
be returned. Note, this currently assumes the given H2O and CO2 concentrations are
the system total, not the total dissolved. If one of the volatile species has a zero or
negative concentration, the pure fluid model for the other volatile species will be used.
Parameters
----------
pressure float
The total pressure in bars.
sample pandas Series or dict
Major element oxides in wt% (including volatiles).
return_dict bool
Set the return type, if true a dict will be returned, if False two floats will be
returned. Default is True.
Returns
-------
dict or floats
Mole fractions of the volatile species in the fluid, in the order given by
self.volatile_species if floats.
"""
if len(self.volatile_species) != 2:
raise InputError("Currently equilibrium fluid compositions can only be calculated when\
two volatile species are present.")
dissolved_at_0bar = [self.models[0].calculate_dissolved_volatiles(sample=sample,pressure=0.0,**kwargs),
self.models[1].calculate_dissolved_volatiles(sample=sample,pressure=0.0,**kwargs)]
if sample[self.volatile_species[0]] <= 0.0 or sample[self.volatile_species[0]] <= dissolved_at_0bar[0]:
Xv0 = 0.0
Xv1 = self.models[1].calculate_equilibrium_fluid_comp(pressure=pressure,sample=sample,**kwargs)
elif sample[self.volatile_species[1]] <= 0.0 or sample[self.volatile_species[1]] <= dissolved_at_0bar[1]:
Xv1 = 0.0
Xv0 = self.models[0].calculate_equilibrium_fluid_comp(pressure=pressure,sample=sample,**kwargs)
else:
satP = self.calculate_saturation_pressure(sample,**kwargs)
if satP < pressure:
if return_dict == True:
return {self.volatile_species[0]:0,self.volatile_species[1]:0}
else:
return (0,0)
molfracs = wtpercentOxides_to_molOxides(sample)
(Xt0, Xt1) = (molfracs[self.volatile_species[0]],molfracs[self.volatile_species[1]])
try:
Xv0 = root_scalar(self.root_for_fluid_comp,bracket=[1e-15,1-1e-15],args=(pressure,Xt0,Xt1,sample,kwargs)).root
Xv1 = 1 - Xv0
except:
try:
Xv0 = root_scalar(self.root_for_fluid_comp,x0=0.5,x1=0.1,args=(pressure,Xt0,Xt1,sample,kwargs)).root
Xv1 = 1 - Xv0
except:
raise SaturationError("Equilibrium fluid not found. Likely an issue with the numerical solver.")
if return_dict == True:
return {self.volatile_species[0]:Xv0,self.volatile_species[1]:Xv1}
else:
return Xv0, Xv1
def calculate_saturation_pressure(self,sample,**kwargs):
"""
Calculates the pressure at which a fluid will be saturated, given the dissolved volatile
concentrations. If one of the volatile species has a zero or negative concentration the
pure fluid model for the other species will be used. If one of the volatile species has a
concentration lower than the concentration dissolved at 0 bar, the pure fluid model for the
other species will be used.
Parameters
----------
sample pandas Series or dict
Major element oxides in wt% (including volatiles).
Returns
-------
float
The saturation pressure in bars.
"""
dissolved_at_0bar = [self.models[0].calculate_dissolved_volatiles(sample=sample,pressure=0.0,**kwargs),
self.models[1].calculate_dissolved_volatiles(sample=sample,pressure=0.0,**kwargs)]
if sample[self.volatile_species[0]] <= 0.0 or sample[self.volatile_species[0]] <= dissolved_at_0bar[0]:
satP = self.models[1].calculate_saturation_pressure(sample=sample,**kwargs)
elif sample[self.volatile_species[1]] <= 0.0 or sample[self.volatile_species[1]] <= dissolved_at_0bar[1]:
satP = self.models[0].calculate_saturation_pressure(sample=sample,**kwargs)
else:
volatile_concs = np.array(tuple(sample[species] for species in self.volatile_species))
x0 = 0
for model in self.models:
xx0 = model.calculate_saturation_pressure(sample=sample,**kwargs)
if np.isnan(xx0) == False:
x0 += xx0
try:
satP = root(self.root_saturation_pressure,x0=[x0,0.5],args=(volatile_concs,sample,kwargs)).x[0]
except:
warnings.warn("Saturation pressure not found.",RuntimeWarning)
satP = np.nan
return satP
def calculate_isobars_and_isopleths(self,pressure_list,isopleth_list=[0,1],points=51,
return_dfs=True,extend_to_zero=True,**kwargs):
"""
Calculates isobars and isopleths. Isobars can be calculated for any number of pressures. Variables
required by each of the pure fluid models must be passed, e.g. sample, temperature, etc.
Parameters
----------
pressure_list list
List of all pressure values at which to calculate isobars, in bars.
isopleth_list list
Default value is None, in which case only isobars will be calculated. List of all
fluid compositions in mole fraction (of the first species in self.volatile_species) at which
to calcualte isopleths. Values can range from 0 to 1.
points int
The number of points in each isobar and isopleth. Default value is 101.
return_dfs bool
If True, the results will be returned as two pandas DataFrames, as produced by the MagmaSat
method. If False the results will be returned as lists of numpy arrays.
Returns
-------
pandas DataFrame object(s) or list(s)
If isopleth_list is not None, two objects will be returned, one with the isobars and the second with
the isopleths. If return_dfs is True, two pandas DataFrames will be returned with column names
'Pressure' or 'XH2O_fl', 'H2O_liq', and 'CO2_liq'. If return_dfs is False, two lists of numpy arrays
will be returned. Each array is an individual isobar or isopleth, in the order passed via pressure_list
or isopleth_list. The arrays are the concentrations of H2O and CO2 in the liquid, in the order of the
species in self.volatile_species.
"""
if len(self.volatile_species) != 2 or 'H2O' not in self.volatile_species or 'CO2' not in self.volatile_species:
raise InputError("calculate_isobars_and_isopleths may only be used with a H2O-CO2 fluid.")
H2O_id = self.volatile_species.index('H2O')
CO2_id = self.volatile_species.index('CO2')
has_isopleths = True
if isopleth_list is None:
has_isopleths = False
isobars_df = | pd.DataFrame(columns=['Pressure','H2O_liq','CO2_liq']) | pandas.DataFrame |
import nose
import os
import string
from distutils.version import LooseVersion
from datetime import datetime, date, timedelta
from pandas import Series, DataFrame, MultiIndex, PeriodIndex, date_range
from pandas.compat import range, lrange, StringIO, lmap, lzip, u, zip
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
from pandas.core.config import set_option
import numpy as np
from numpy import random
from numpy.random import randn
from numpy.testing import assert_array_equal
from numpy.testing.decorators import slow
import pandas.tools.plotting as plotting
def _skip_if_no_scipy():
try:
import scipy
except ImportError:
raise nose.SkipTest("no scipy")
@tm.mplskip
class TestSeriesPlots(tm.TestCase):
def setUp(self):
import matplotlib as mpl
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
def tearDown(self):
tm.close()
@slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
_check_plot_works(self.ts.plot, rot=0)
_check_plot_works(self.ts.plot, style='.', logy=True)
_check_plot_works(self.ts.plot, style='.', logx=True)
_check_plot_works(self.ts.plot, style='.', loglog=True)
_check_plot_works(self.ts[:10].plot, kind='bar')
_check_plot_works(self.iseries.plot)
_check_plot_works(self.series[:5].plot, kind='bar')
_check_plot_works(self.series[:5].plot, kind='line')
_check_plot_works(self.series[:5].plot, kind='barh')
_check_plot_works(self.series[:10].plot, kind='barh')
_check_plot_works(Series(randn(10)).plot, kind='bar', color='black')
@slow
def test_plot_figsize_and_title(self):
# figsize and title
import matplotlib.pyplot as plt
ax = self.series.plot(title='Test', figsize=(16, 8))
self.assertEqual(ax.title.get_text(), 'Test')
assert_array_equal(np.round(ax.figure.get_size_inches()),
np.array((16., 8.)))
@slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
import matplotlib.colors as colors
default_colors = plt.rcParams.get('axes.color_cycle')
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(kind='bar')
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(default_colors[i % len(default_colors)])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
ax = df.plot(kind='bar', color=custom_colors)
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(custom_colors[i])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot(kind='bar', colormap='jet')
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
# Test colormap functionality
ax = df.plot(kind='bar', colormap=cm.jet)
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
df.ix[:, [0]].plot(kind='bar', color='DodgerBlue')
@slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot(kind='bar', linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# stacked
ax = df.plot(kind='bar', stacked=True, linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# subplots
axes = df.plot(kind='bar', linewidth=2, subplots=True)
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
@slow
def test_bar_log(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = Series([200, 500]).plot(log=True, kind='bar')
assert_array_equal(ax.yaxis.get_ticklocs(), expected)
def test_rotation(self):
df = DataFrame(randn(5, 5))
ax = df.plot(rot=30)
for l in ax.get_xticklabels():
self.assertEqual(l.get_rotation(), 30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
ax = ser.plot()
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
self.assertEqual(xp, ax.get_xlim()[0])
@slow
def test_hist(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
_check_plot_works(self.ts.hist, by=self.ts.index.month)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with tm.assertRaises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@slow
def test_hist_layout(self):
n = 10
gender = tm.choice(['Male', 'Female'], size=n)
df = DataFrame({'gender': gender,
'height': random.normal(66, 4, size=n), 'weight':
random.normal(161, 32, size=n)})
with tm.assertRaises(ValueError):
df.height.hist(layout=(1, 1))
with tm.assertRaises(ValueError):
df.height.hist(layout=[1, 1])
@slow
def test_hist_layout_with_by(self):
import matplotlib.pyplot as plt
n = 10
gender = tm.choice(['Male', 'Female'], size=n)
df = DataFrame({'gender': gender,
'height': random.normal(66, 4, size=n), 'weight':
random.normal(161, 32, size=n),
'category': random.randint(4, size=n)})
_check_plot_works(df.height.hist, by=df.gender, layout=(2, 1))
tm.close()
_check_plot_works(df.height.hist, by=df.gender, layout=(1, 2))
tm.close()
_check_plot_works(df.weight.hist, by=df.category, layout=(1, 4))
| tm.close() | pandas.util.testing.close |
"""
Original data:公司股市代號對照表.csv
Conditions:
1.單月營收歷月排名 1高
from 月營收創新高.xlsx
2.負債比 < 40%
季度
https://goodinfo.tw/StockInfo/StockList.asp?RPT_TIME=&MARKET_CAT=%E7%86%B1%E9%96%80%E6%8E%92%E8%A1%8C&INDUSTRY_CAT=%E8%B2%A0%E5%82%B5%E7%B8%BD%E9%A1%8D%E4%BD%94%E7%B8%BD%E8%B3%87%E7%94%A2%E6%AF%94%E6%9C%80%E9%AB%98%40%40%E8%B2%A0%E5%82%B5%E7%B8%BD%E9%A1%8D%40%40%E8%B2%A0%E5%82%B5%E7%B8%BD%E9%A1%8D%E4%BD%94%E7%B8%BD%E8%B3%87%E7%94%A2%E6%AF%94%E6%9C%80%E9%AB%98
3.全體董監持股 + 本國法人持股 > 30%
全體董監
https://goodinfo.tw/StockInfo/StockList.asp?RPT_TIME=&MARKET_CAT=%E7%86%B1%E9%96%80%E6%8E%92%E8%A1%8C&INDUSTRY_CAT=%E5%85%A8%E9%AB%94%E8%91%A3%E7%9B%A3%E6%8C%81%E8%82%A1%E6%AF%94%E4%BE%8B%28%25%29%40%40%E5%85%A8%E9%AB%94%E8%91%A3%E7%9B%A3%40%40%E6%8C%81%E8%82%A1%E6%AF%94%E4%BE%8B%28%25%29
本國法人
https://goodinfo.tw/StockInfo/StockList.asp?RPT_TIME=&MARKET_CAT=%E7%86%B1%E9%96%80%E6%8E%92%E8%A1%8C&INDUSTRY_CAT=%E6%9C%AC%E5%9C%8B%E6%B3%95%E4%BA%BA%E6%8C%81%E8%82%A1%E6%AF%94%E4%BE%8B%28%25%29%40%40%E6%9C%AC%E5%9C%8B%E6%B3%95%E4%BA%BA%40%40%E6%8C%81%E8%82%A1%E6%AF%94%E4%BE%8B%28%25%29
4.全體董監質押比 < 10%
https://goodinfo.tw/StockInfo/StockList.asp?RPT_TIME=&MARKET_CAT=%E7%86%B1%E9%96%80%E6%8E%92%E8%A1%8C&INDUSTRY_CAT=%E5%85%A8%E9%AB%94%E8%91%A3%E7%9B%A3%E8%B3%AA%E6%8A%BC%E6%AF%94%E4%BE%8B%28%25%29%40%40%E5%85%A8%E9%AB%94%E8%91%A3%E7%9B%A3%40%40%E8%B3%AA%E6%8A%BC%E6%AF%94%E4%BE%8B%28%25%29
和全體董監持股是相同的資料,僅排序不同
5.毛利率, 營益率, 稅後淨利率上升(三率三升)
毛利率歷季排名 1 (第一名)
營益率歷季排名 1
淨利率歷季排名 1
6.殖利率 > 1%
現金殖利率
https://goodinfo.tw/StockInfo/StockList.asp?RPT_TIME=&MARKET_CAT=%E7%86%B1%E9%96%80%E6%8E%92%E8%A1%8C&INDUSTRY_CAT=%E7%8F%BE%E9%87%91%E6%AE%96%E5%88%A9%E7%8E%87+%28%E6%9C%80%E6%96%B0%E5%B9%B4%E5%BA%A6%29%40%40%E7%8F%BE%E9%87%91%E6%AE%96%E5%88%A9%E7%8E%87%40%40%E6%9C%80%E6%96%B0%E5%B9%B4%E5%BA%A6
7.多項排列(均線)
均線 向上
8.現金流量比率 > 0 or 營業現金流量 > 0(skip)
https://goodinfo.tw/StockInfo/StockList.asp?RPT_TIME=&MARKET_CAT=%E6%99%BA%E6%85%A7%E9%81%B8%E8%82%A1&INDUSTRY_CAT=%E6%9C%88K%E7%B7%9A%E7%AA%81%E7%A0%B4%E5%AD%A3%E7%B7%9A%40%40%E6%9C%88K%E7%B7%9A%E5%90%91%E4%B8%8A%E7%AA%81%E7%A0%B4%E5%9D%87%E5%83%B9%E7%B7%9A%40%40%E5%AD%A3%E7%B7%9A
9.股票尚未經歷大漲大跌(skip)
"""
import sys
import pdb
import time
import pandas as pd
import random
from datetime import datetime
import global_vars
from stock_web_crawler import stock_crawler, delete_header, excel_formatting
from stock_info import stock_ID_name_mapping
# global variables
DEBT_RATIO = 40 # 負債比
STAKEHOLDING = 30 # 持股
PLEDGE_RATIO = 10 # 質押比
GROSS_MARGIN = 20 # 毛利率
OPERATING_MARGIN = 20 # 營益率
NET_PROFIT_MARGIN = 20 # 稅後淨利率
DIVIDEND_YIELD = 1 # 現金殖利率
def main():
file_path = global_vars.DIR_PATH + "公司股市代號對照表.csv"
stock_ID = list()
stock_name = list()
with open(file_path, 'r', encoding="UTF-8") as file_r:
file_r.readline() # skip the first row
for line in file_r:
line = line.split(",")
stock_ID.append(line[0])
stock_name.append(line[1])
df_combine = pd.DataFrame(list(zip(stock_ID, stock_name)), columns=["代號", "名稱"])
file_path = global_vars.DIR_PATH + "月營收創新高.xlsx"
try:
last_month = datetime.now().month-1
if last_month <= 0:
last_month = 12
df = pd.read_excel(file_path, sheet_name=f"{last_month}月")
except ValueError as ve:
print("ValueError:", ve)
sys.stderr.write("Please excute stock_web_crawler.py first.\n")
sys.exit(0)
df["代號"] = df["代號"].astype(str)
df = df[["代號", "單月營收歷月排名"]]
df_combine = pd.merge(df_combine, df, on=["代號"], how="left")
file_path = global_vars.DIR_PATH + "stock_crawler.xlsx"
dfs = | pd.read_excel(file_path, sheet_name=None) | pandas.read_excel |
import os
import math
import copy
import random
import calendar
import csv
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
import sqlite3
import seaborn as sns
#from atnresilience import atn_analysis as atn
import atn_analysis
import db_tools
# Set global styles for plots
plt.rcParams["font.family"] = "Times New Roman"
sns.set_palette("colorblind")
matplotlib.rc('xtick', labelsize=8)
matplotlib.rc('ytick', labelsize=8)
line_type = {1:'-',2:'--',3:':',4:'-.'}
def remove_frequency(db_path, file, airline, include_data, can_limit, zs_limit, processed_direc):
"""
Creates a dictionary of airports and their removal frequency for a given airline
Parameters
----------
file: int
Year of selected data
airline: string
Airline to get data from
include_data: string
Type of airline data to query from csv
can_limit: int
Cancellation limit
zs_limit: int
The z-score limit
Returns
-------
Returns a dictionary containing airport removal frequency values
Notes
-----
"""
df_net_tuple = pd.DataFrame()
df_net = atn_analysis.raw_query(db_path, file, airline)
df_net_tuple["Origin"] = df_net.Origin_Airport_Code
df_net_tuple["Destination"] = df_net.Destination_Airport_Code
graph = [tuple(x) for x in df_net_tuple.to_records(index=False)]
G = nx.Graph()
G.add_edges_from(graph)
tempG = G.copy()
Airport_Dict = {}
for i in G.nodes():
Airport_Dict[i] = 0
Total_List = get_remove_list(db_path, file,include_data, airline, can_limit, zs_limit, processed_direc)
if int(file)%4 == 0:
total_day = 366
else:
total_day = 365
for j in range(total_day):
airport_list = Total_List[j]
for l in airport_list:
tempG.remove_node(l)
Airport_Dict[l] = Airport_Dict[l] + 1
tempG = G.copy()
return(Airport_Dict)
def weighted_edge(db_path, file, airline):
"""
Creates a data frame of origin airports, destination airports and weights for each route
Parameters
----------
file: int
Year of selected data
airline: string
Airline to get data from
include_data: string
Type of airline data to query from csv
can_limit: int
Cancellation limit
zs_limit: int
The z-score limit
Returns
-------
Returns a data frame containing each respective weighted route from an origin airport to a destination
Notes
-----
"""
df = atn_analysis.raw_query(db_path, file, airline)
by_origin = df.groupby([df.Origin_Airport_Code]).Can_Status.count()
airport_list = by_origin.index.tolist()
df = df[df['Destination_Airport_Code'].isin(airport_list)]
df_tuple = pd.DataFrame()
df_weighted = df.groupby([df.Origin_Airport_Code, df.Destination_Airport_Code]).Can_Status.count().reset_index()
df_tuple["Origin"] = df_weighted.Origin_Airport_Code
df_tuple["Destination"] = df_weighted.Destination_Airport_Code
file_str = int(str(file)[:4])
if calendar.isleap(file_str) == 1:
days = 366
else:
days = 365
df_tuple["Weight"] = df_weighted.Can_Status
weight_values = [math.log(y, 10) for y in df_tuple.Weight.values]
for i in range(0, len(weight_values)):
df_tuple.Weight.values[i] = weight_values[i]
return(df_tuple)
def get_remove_list(db_path, file, include_data, airline, can_limit, zs_limit, processed_direc):
"""
Return a remove_list in a year (airline specific, include_data specific) based on cancelation limit and z_score limit.
Parameters
----------
file: int
Year of selected data
include_data: string
Specify what kind of data to include in processed flight data. See drop_flights in M-D File. Possible parameters are:
CC: Cancellations only
ADD: Arrival delays including diversions
ADM: Purely arrival delays excluding cancellations or diversions
DCC: Combined delay. If arrival delay is greater than a set threshold, the flight is considered cancelled
DD: Departure delays. Does not include cancelled or diverted flights.
airline: string
Airline to get data from. This is the 2 letter airline code (ex: AA, UA, DL, WN)
can_limit: float
Cancellation Limit. Between 0 and 1
zs_limit: float
z-score limit. Between 0 and 1
Returns
-------
Pandas df
Notes
-----
"""
z_score_path = '%s%s_%s_Zdata_%s.csv'%(processed_direc, file,airline,include_data)
#df_score = pd.read_csv(raw_file_drop, index_col="Date")
df_score = pd.read_csv(z_score_path, index_col = "Day_of_Year")
df_score.index = pd.to_datetime(df_score.index)
airport_list = df_score.columns.tolist()
df = atn_analysis.raw_query(db_path,file,airline)
df = df[df['Origin_Airport_Code'].isin(airport_list)] # Filtering to make sure airports are equal in both directions
df = df[df['Destination_Airport_Code'].isin(airport_list)]
by_origin_count = df.groupby(['Flight_Date', 'Origin_Airport_Code'], as_index=False)[['Can_Status']].count()
by_origin = df.groupby(['Flight_Date', 'Origin_Airport_Code'], as_index=False)[['Can_Status']].sum()
by_origin.Can_Status = by_origin.Can_Status / by_origin_count.Can_Status
#print(by_origin)
df_score["idx"] = df_score.index
df_score = pd.melt(df_score, id_vars='idx', value_vars=airport_list)
df_score = df_score.sort_values(['idx', 'variable'], ascending=[True, True])
df_score.columns = ["Date", "Airports", "Z_Score"]
df_score.set_index('Date')
df_score["Cancellations"] = by_origin.Can_Status
### Creating the or conditions. First is the percentage of delayed flights and the second is the z-score
df_score["Z_score_9901"] = np.where((df_score['Cancellations'] > can_limit) | (df_score['Z_Score'] > zs_limit), 1, 0)
#print(df_score)
### Creating pivot table for easy manipulation. This creates the date as the index with the properties corresponding to
### it and finally repeats this trend for all airports being considered.
df_pivot = df_score.pivot_table('Z_score_9901', ['Date'], 'Airports')
#print(df_pivot)
s = np.asarray(np.where(df_pivot == 1, ['{}'.format(x) for x in df_pivot.columns], '')).tolist()
s_nested = []
for k in s:
p = list(filter(None,k))
#p = filter(None,k)
s_nested.append(p)
#s_nested.extend(p)
return s_nested
def inv_average_shortest_path_length(graph, weight=None):
"""
Creates an unweight inverse average path length graph
Parameters
----------
graph: python graph object
weight: default
Returns
-------
Returns the IAPL unweighted graph
Notes
-----
"""
avg = 0.0
if weight is None:
for node in graph:
avg_path_length = nx.single_source_shortest_path_length(graph, node) # get the shortest path lengths from source to all reachable nodes (unweighted)
del avg_path_length[node] # Deletes source node from the list to avoid division by 0
inv_avg_path_length = copy.deepcopy(avg_path_length)
inv_avg_path_length.update((x, 1/y) for x, y in avg_path_length.items())
avg += sum(inv_avg_path_length.values())
n = len(graph)
if n == 1 or n == 0:
return 0
else:
return avg/(n*(n-1))
def inv_average_shortest_path_length_W(graph, weight=None):
"""
Creates the table atn_performance in the database at the specified input location if one does not exist.
Parameters
----------
graph: python graph object
weight: default
Returns
-------
Returns the inverse average path length weighted graph
Notes
-----
"""
avg = 0.0
if weight is None:
for node in graph:
avg_path_length = nx.single_source_dijkstra_path_length(graph, node) # get the shortest path lengths from source to all reachable nodes (weighted)
del avg_path_length[node] # Deletes source node from the list to avoid division by 0
inv_avg_path_length = copy.deepcopy(avg_path_length)
inv_avg_path_length.update((x, 1/y) for x, y in avg_path_length.items())
avg += sum(inv_avg_path_length.values())
n = len(graph)
if n == 1 or n == 0:
return 0
else:
return avg/(n*(n-1))
def Data_Driven_W(file_list, airline_list, include_data, can_limit, zs_limit, processed_direc, graph_direc):
"""
Calculate the cluster size and IAPL for each day in a year after removal based on data-driven method.
Parameters
----------
file_list: list
List contaning years to process
airline_list: list
List contaning airlines to process
include_data: string
Specify what kind of data to include in processed flight data. See drop_flights in M-D File. Possible parameters are:
CC: Cancellations only
ADD: Arrival delays including diversions
ADM: Purely arrival delays excluding cancellations or diversions
DCC: Combined delay. If arrival delay is greater than a set threshold, the flight is considered cancelled
DD: Departure delays. Does not include cancelled or diverted flights.
can_limit: float
Cancellation threshold
zs_limit: float
z-score threshold
Returns
-------
The cluster size and IAPL for each day of the year after removal based on data-driven method.
Notes
-----
"""
for file in file_list:
## iteration of years first
figure_num = 1
CSV_df = pd.DataFrame(columns = airline_list)
for airline in airline_list:
# CSV_df[airline] = [1,2,3,4]
# CSV_file = "%s_DD_IAPL.csv" %(file)
# CSV_df.to_csv(CSV_file, index=False)
## Get the directory path
script_dir = os.path.dirname(os.getcwd())
db_local_path = "data/processed/atn_db.sqlite"
## df set up from Keshav (NO CHANGE) (Weighted Graph)
df = pd.DataFrame()
db_path = os.path.join(script_dir, db_local_path)
fields = ["Origin_Airport_Code", "Destination_Airport_Code", "Can_Status"]
df_net = atn_analysis.raw_query(db_path,file,airline)
df["Origin_Airport_Code"] = df_net.Origin_Airport_Code
df["Destination_Airport_Code"] = df_net.Destination_Airport_Code
df["Can_Status"] = df_net.Can_Status
by_origin = df.groupby([df.Origin_Airport_Code]).Can_Status.count()
airport_list = by_origin.index.tolist()
df = df[df['Destination_Airport_Code'].isin(airport_list)]
#print(df)
df_tuple = pd.DataFrame()
df_weighted = df.groupby([df.Origin_Airport_Code, df.Destination_Airport_Code]).Can_Status.count().reset_index()
df_tuple["Origin"] = df_weighted.Origin_Airport_Code
df_tuple["Destination"] = df_weighted.Destination_Airport_Code
if int(file)%4 == 0:
days = 366
else:
days = 365
df_tuple["Weight"] = df_weighted.Can_Status/days
df_tuple.Weight = 1/df_tuple.Weight
## Output lists initialization:
#day_IAPL = 0
day_CS = 0
#output_IAPL = []
output_CS = []
NoD = []
## Graph object initialization
graph = [tuple(x) for x in df_tuple.to_records(index=False)]
G = nx.Graph()
## Set up the weighted graph
G.add_weighted_edges_from(graph)
#print(G.nodes())
tempG = G.copy() #use temporary graph for the loop
## Remove list for the whole year
Total_Remove_List = get_remove_list(db_path,file,include_data, airline, can_limit, zs_limit,processed_direc)
if int(file)%4 == 0:
total_day = 366
else:
total_day = 365
for j in range(total_day):
## Remove the nodes in each day and get the CS and IAPL data
#day_IAPL = 0
Day_Remove_List = Total_Remove_List[j]
NoD.append(j)
for l in Day_Remove_List:
tempG.remove_node(l)
#largest_component_b = max(nx.connected_components(tempG), key=len)
#day_IAPL =(inv_average_shortest_path_length_W(tempG))
largest_component_b = max(nx.connected_components(tempG), key=len)
day_CS = len(largest_component_b)
#len(largest_component_b) = cluster size
#cluster fraction = cluster size/number of nodes
#output_IAPL.append(day_IAPL)
output_CS.append(day_CS)
#sum_IAPL = sum_IAPL + (inv_average_shortest_path_length(tempG))
tempG = G.copy()
## plotting command
plt.figure(figure_num)
#line = plt.plot(NoD,output_IAPL, label="{}".format(airline))
line = plt.plot(NoD,output_CS, label="{}".format(airline))
plt.legend()
#CSV_df[airline] = output_IAPL
CSV_df[airline] = output_CS
#CSV_file = "%s_DD_IAPL.csv" %(file)
CSV_file = "%s%s_DD_CS.csv" %(graph_direc,file)
CSV_df.to_csv(CSV_file, index=False)
#plt.title("{} Data Driven IAPL".format(str(file)))
plt.xlabel("Day")
#plt.ylabel("IAPL")
plt.ylabel("Cluster Size")
#plt.savefig("{}_Data_Driven_IAPL.png".format(str(file)))
plt.savefig("%s%s_Data_Driven_CS.png"%(graph_direc,file))
plt.show()
figure_num = figure_num + 1
def Pure_Graph_W_Shu(file_list, airline_list, include_data, processed_direc, rep_num):
"""
Calculate the linear algebraic connectivity, cluster size and IAPL for each day in a year after random removal based on Pure Graph method.
Random Removal set up by shuffle function
Parameters
----------
file_list: list
List contaning years to process
airline_list: list
List contaning airlines to process
include_data: string
Specify what kind of data to include in processed flight data. See drop_flights in M-D File. Possible parameters are:
CC: Cancellations only
ADD: Arrival delays including diversions
ADM: Purely arrival delays excluding cancellations or diversions
DCC: Combined delay. If arrival delay is greater than a set threshold, the flight is considered cancelled
DD: Departure delays. Does not include cancelled or diverted flights.
rep_num: int
Number of repititions
Returns
-------
csv with the cluster size and IAPL for each day of the year after removal based on data-driven method.
Notes
-----
"""
for airline in airline_list:
rep_ite = 1
Total_AC = []
Total_Cluster_Size = []
Total_IAPL = []
for i in range(len(file_list)):
## initialize the output lists
Total_AC.append(0)
Total_Cluster_Size.append(0)
Total_IAPL.append(0)
## Save the data in csv
filename1 = "%s%s_ACR.csv" %(processed_direc,airline)
with open(filename1, 'w') as myfile1:
wr1 = csv.writer(myfile1, quoting=csv.QUOTE_ALL)
wr1.writerow(file_list)
filename2 = "%s%s_IAPLR.csv" %(processed_direc,airline)
with open(filename2, 'w') as myfile2:
wr2 = csv.writer(myfile2, quoting=csv.QUOTE_ALL)
wr2.writerow(file_list)
filename3 = "%s%s_CSR.csv" %(processed_direc,airline)
with open(filename3, 'w') as myfile3:
wr3 = csv.writer(myfile3, quoting=csv.QUOTE_ALL)
wr3.writerow(file_list)
while rep_ite < rep_num+1:
## start the reptition
year_IAPL = []
year_Cluster_Size = []
year_AC = []
for file in file_list:
## Get the directory path
script_dir = os.path.dirname(os.getcwd())
db_local_path = "data/processed/atn_db.sqlite"
## df set up from Keshav (NO CHANGE)
df = pd.DataFrame()
db_path = os.path.join(script_dir, db_local_path)
fields = ["Origin_Airport_Code", "Destination_Airport_Code", "Can_Status"]
#df_net = pd.read_csv(comb_file, usecols=fields)
df_net = atn_analysis.raw_query(db_path,file,airline)
df["Origin_Airport_Code"] = df_net.Origin_Airport_Code
df["Destination_Airport_Code"] = df_net.Destination_Airport_Code
df["Can_Status"] = df_net.Can_Status
by_origin = df.groupby([df.Origin_Airport_Code]).Can_Status.count()
airport_list = by_origin.index.tolist()
df = df[df['Destination_Airport_Code'].isin(airport_list)]
#print(df)
df_tuple = pd.DataFrame()
df_weighted = df.groupby([df.Origin_Airport_Code, df.Destination_Airport_Code]).Can_Status.count().reset_index()
df_tuple["Origin"] = df_weighted.Origin_Airport_Code
df_tuple["Destination"] = df_weighted.Destination_Airport_Code
if int(file)%4 == 0:
days = 366
else:
days = 365
df_tuple["Weight"] = df_weighted.Can_Status/days
df_tuple.Weight = 1/df_tuple.Weight
## Output lists initialization:
## Graph object initialization
graph = [tuple(x) for x in df_tuple.to_records(index=False)]
G = nx.Graph()
G.add_weighted_edges_from(graph)
NodeNum = G.number_of_nodes()
#print('Weighted Alebraic Connectivity: ', nx.algebraic_connectivity(G))
year_AC.append(nx.algebraic_connectivity(G))
sum_IAPL = 0
sum_Cluster_Size = 0
IAPL_list = []
Cluster_Size_list = []
Remove_List = []
for node in G.nodes():
## Get the list of the airports
Remove_List.append(node)
## Shuffle the lists
random.shuffle(Remove_List)
for l in Remove_List:
G.remove_node(l)
if len(G.nodes()) != 0:
## Add up the data after removing each node
largest_component_b = max(nx.connected_components(G), key=len)
IAPL_list.append(inv_average_shortest_path_length_W(G))
Cluster_Size_list.append(len(largest_component_b)/NodeNum)
sum_IAPL = sum_IAPL + (inv_average_shortest_path_length_W(G))
sum_Cluster_Size = sum_Cluster_Size + len(largest_component_b)/NodeNum
## Save the data of the year
year_IAPL.append(sum_IAPL)
year_Cluster_Size.append(sum_Cluster_Size)
with open(filename1, 'a') as myfile1:
wr1 = csv.writer(myfile1, quoting=csv.QUOTE_ALL)
wr1.writerow(year_AC)
with open(filename2, 'a') as myfile2:
wr2 = csv.writer(myfile2, quoting=csv.QUOTE_ALL)
wr2.writerow(year_IAPL)
with open(filename3, 'a') as myfile3:
wr3 = csv.writer(myfile3, quoting=csv.QUOTE_ALL)
wr3.writerow(year_Cluster_Size)
# print('Unweighted Summation of IAPL: ', sum_IAPL)
# print('Unweighted Summation of Cluster Size: ', sum_Cluster_Size)
# print('Unweighted IAPL list', IAPL_list)
for i in range(len(file_list)):
## Get the sum for the average
Total_AC[i] = Total_AC[i] + year_AC[i]
Total_IAPL[i] = Total_AC[i] + year_IAPL[i]
Total_Cluster_Size[i] = Total_Cluster_Size[i] + year_Cluster_Size[i]
rep_ite = rep_ite + 1
for i in range(len(file_list)):
## Get the average
Total_AC[i] = Total_AC[i]/rep_num
Total_IAPL[i] = Total_IAPL[i]/rep_num
Total_Cluster_Size[i] = Total_Cluster_Size[i]/rep_num
## Plotting Command:
plt.figure(num=1,figsize=(2.8,2.0),dpi=300)
# line1 = plt.plot(file_list,Total_IAPL, label="{}".format(airline))
plt.plot(file_list,Total_IAPL, label="{}".format(airline))
plt.legend()
plt.figure(num=2,figsize=(2.8,2.0),dpi=300)
# line2 = plt.plot(file_list,Total_Cluster_Size, label="{}".format(airline))
plt.plot(file_list,Total_Cluster_Size, label="{}".format(airline))
plt.legend()
plt.figure(num=3,figsize=(2.8,2.0),dpi=300)
# line3 = plt.plot(file_list,Total_AC, label="{}".format(airline))
plt.plot(file_list,Total_AC, label="{}".format(airline))
plt.legend()
plt.figure(1)
plt.title("IAPL (Random)")
plt.xlabel("Year")
plt.ylabel("IAPL")
plt.savefig("Pure_Graph_IAPLR.png")
plt.figure(2)
plt.title("Cluster Size (Random)")
plt.xlabel("Year")
plt.ylabel("Cluster Size")
plt.savefig("Pure_Graph_CSR.png")
plt.figure(3)
plt.title("Algebraic Connectivity (Random)")
plt.xlabel("Year")
plt.ylabel("Algebraic Connectivity")
plt.savefig("Pure_Graph_ACR.png")
plt.show()
def Pure_Graph_W_Tar(file_list,airline_list,processed_direc,graph_direc):
"""
Calculate the linear algebraic connectivity, cluster size and IAPL for each day in a year after targeted removal based on Pure Graph method.
Targeted removal set up by the degree of the nodes. (Remove the node with higher node first, degree calculated when the weight is set as flight frequency)
Parameters
----------
file_list: list
List contaning years to process
airline_list: list
List contaning airlines to process
Returns
-------
Graph with the removels.
Notes
-----
"""
line_type_iter = 0
for airline in airline_list:
line_type_iter += 1
year_IAPL = []
year_Cluster_Size = []
year_AC = []
for file in file_list:
## Get the directory path
script_dir = os.path.dirname(os.getcwd())
#comb_path = "data/processed/%s_%s_combined.csv" % (file,airline)
db_local_path = "data/processed/atn_db.sqlite"
## df set up from Keshav (NO CHANGE)
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from data import read_marcap, load_datas_scaled
from model import build_model_rnn
data_dir = './data'
if not os.path.exists(data_dir):
data_dir = '../data'
print(os.listdir(data_dir))
marcap_dir = os.path.join(data_dir, 'marcap')
marcap_data = os.path.join(marcap_dir, 'data')
os.listdir(marcap_data)
train_start = pd.to_datetime('2000-01-01')
train_end = pd.to_datetime('2020-06-30')
test_start = pd.to_datetime('2020-08-01')
test_end = pd.to_datetime('2020-10-31')
train_start, test_end
# 삼성전기 code '009150'
df_sem = read_marcap(train_start, test_end, ['009150'], marcap_data)
df_sem.drop(df_sem[df_sem['Marcap'] == 0].index, inplace=True)
df_sem.drop(df_sem[df_sem['Amount'] == 0].index, inplace=True)
df_sem['LogMarcap'] = np.log(df_sem['Marcap'])
df_sem['LogAmount'] = np.log(df_sem['Amount'])
df_sem
n_seq = 10
x_cols = ['LogMarcap', 'LogAmount', 'Open', 'High', 'Low', 'Close']
y_col = 'LogMarcap'
train_inputs, train_labels, test_inputs, test_labels, scaler_dic = load_datas_scaled(df_sem, x_cols, y_col, train_start, train_end, test_start, test_end, n_seq)
train_inputs.shape, train_labels.shape, test_inputs.shape, test_labels.shape
model = build_model_rnn(n_seq, len(x_cols))
model.summary()
model.compile(loss=tf.losses.MeanSquaredError(), optimizer=tf.optimizers.Adam())
# early stopping
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
# save weights
save_weights = tf.keras.callbacks.ModelCheckpoint(os.path.join('1corp_scaled_logmarcap_logamount_prices.hdf5'), monitor='val_loss', verbose=1, save_best_only=True, mode='min', save_freq='epoch', save_weights_only=True)
# save weights
csv_log = tf.keras.callbacks.CSVLogger(os.path.join('1corp_scaled_logmarcap_logamount_prices.csv'), separator=',', append=False)
# train
history = model.fit(train_inputs, train_labels, epochs=100, batch_size=32, validation_data=(test_inputs, test_labels), callbacks=[early_stopping, save_weights, csv_log])
model = build_model_rnn(n_seq, len(x_cols))
model.summary()
model.load_weights(save_weights.filepath)
#
# train eval
#
n_batch = 32
train_preds = []
for i in range(0, len(train_inputs), n_batch):
batch_inputs = train_inputs[i:i + n_batch]
y_pred = model.predict(batch_inputs)
y_pred = y_pred.squeeze(axis=-1)
train_preds.extend(y_pred)
train_preds = np.array(train_preds)
assert len(train_labels) == len(train_preds)
train_labels.shape, train_preds.shape
scaler = scaler_dic[y_col]
train_labels_scaled = [scaler.inv_scale_value(v) for v in train_labels]
train_preds_scaled = [scaler.inv_scale_value(v) for v in train_preds]
train_labels_log = np.array(train_labels_scaled)
train_preds_log = np.array(train_preds_scaled)
plt.figure(figsize=(16, 4))
plt.plot(train_labels_log, 'b-', label='y_true')
plt.plot(train_preds_log, 'r--', label='y_pred')
plt.legend()
plt.show()
plt.figure(figsize=(16, 4))
plt.plot(train_labels_log - train_preds_log, 'g-', label='y_diff')
plt.legend()
plt.show()
# https://bkshin.tistory.com/entry/%EB%A8%B8%EC%8B%A0%EB%9F%AC%EB%8B%9D-17-%ED%9A%8C%EA%B7%80-%ED%8F%89%EA%B0%80-%EC%A7%80%ED%91%9C
# https://m.blog.naver.com/PostView.nhn?blogId=limitsinx&logNo=221578145366&proxyReferer=https:%2F%2Fwww.google.com%2F
rmse = tf.sqrt(tf.keras.losses.MSE(train_labels_log, train_preds_log))
mae = tf.keras.losses.MAE(train_labels_log, train_preds_log)
mape = tf.keras.losses.MAPE(train_labels_log, train_preds_log)
print( | pd.DataFrame([rmse, mae, mape], index=['RMSE', 'MAE', 'MAPE']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
from preprocessor import Preprocessor
import keras_tuner as kt
import tensorflow as tf
from trainer import build_model
def get_x_train() -> pd.DataFrame:
return pd.read_csv("data/X_train.csv")
def get_y_train() -> pd.DataFrame:
return pd.read_csv("data/y_train.csv")
def get_x_test() -> pd.DataFrame:
return | pd.read_csv("data/X_test.csv") | pandas.read_csv |
"""Tests for dynamic validator."""
from datetime import date, datetime
import numpy as np
import pandas as pd
from delphi_validator.report import ValidationReport
from delphi_validator.dynamic import DynamicValidator
class TestCheckRapidChange:
params = {"data_source": "", "span_length": 1,
"end_date": "2020-09-02", "expected_lag": {}}
def test_same_df(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
test_df = pd.DataFrame([date.today()] * 5, columns=["time_value"])
ref_df = pd.DataFrame([date.today()] * 5, columns=["time_value"])
validator.check_rapid_change_num_rows(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_0_vs_many(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
time_value = datetime.combine(date.today(), datetime.min.time())
test_df = pd.DataFrame([time_value] * 5, columns=["time_value"])
ref_df = pd.DataFrame([time_value] * 1, columns=["time_value"])
validator.check_rapid_change_num_rows(
test_df, ref_df, time_value, "geo", "signal", report)
assert len(report.raised_errors) == 1
assert "check_rapid_change_num_rows" in [
err.check_data_id[0] for err in report.raised_errors]
class TestCheckAvgValDiffs:
params = {"data_source": "", "span_length": 1,
"end_date": "2020-09-02", "expected_lag": {}}
def test_same_val(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [1, 1, 1, 2, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
test_df = pd.DataFrame(data)
ref_df = pd.DataFrame(data)
validator.check_avg_val_vs_reference(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_same_se(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [np.nan] * 6, "se": [1, 1, 1, 2, 0, 1],
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
test_df = pd.DataFrame(data)
ref_df = pd.DataFrame(data)
validator.check_avg_val_vs_reference(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_same_n(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [np.nan] * 6, "se": [np.nan] * 6,
"sample_size": [1, 1, 1, 2, 0, 1], "geo_id": ["1"] * 6}
test_df = pd.DataFrame(data)
ref_df = pd.DataFrame(data)
validator.check_avg_val_vs_reference(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_same_val_se_n(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [1, 1, 1, 2, 0, 1], "se": [1, 1, 1, 2, 0, 1],
"sample_size": [1, 1, 1, 2, 0, 1], "geo_id": ["1"] * 6}
test_df = pd.DataFrame(data)
ref_df = pd.DataFrame(data)
validator.check_avg_val_vs_reference(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_10x_val(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
test_data = {"val": [1, 1, 1, 20, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
ref_data = {"val": [1, 1, 1, 2, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
test_df = pd.DataFrame(test_data)
ref_df = pd.DataFrame(ref_data)
validator.check_avg_val_vs_reference(
test_df, ref_df,
datetime.combine(date.today(), datetime.min.time()), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_100x_val(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
test_data = {"val": [1, 1, 1, 200, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
ref_data = {"val": [1, 1, 1, 2, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
test_df = pd.DataFrame(test_data)
ref_df = pd.DataFrame(ref_data)
validator.check_avg_val_vs_reference(
test_df, ref_df,
datetime.combine(date.today(), datetime.min.time()), "geo", "signal", report)
assert len(report.raised_errors) == 1
assert "check_test_vs_reference_avg_changed" in [
err.check_data_id[0] for err in report.raised_errors]
def test_1000x_val(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
test_data = {"val": [1, 1, 1, 2000, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
ref_data = {"val": [1, 1, 1, 2, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
test_df = pd.DataFrame(test_data)
ref_df = pd.DataFrame(ref_data)
validator.check_avg_val_vs_reference(
test_df, ref_df,
datetime.combine(date.today(), datetime.min.time()), "geo", "signal", report)
assert len(report.raised_errors) == 1
assert "check_test_vs_reference_avg_changed" in [
err.check_data_id[0] for err in report.raised_errors]
class TestDataOutlier:
params = {"data_source": "", "span_length": 1,
"end_date": "2020-09-02", "expected_lag": {}}
pd.set_option("display.max_rows", None, "display.max_columns", None)
# Test to determine outliers based on the row data, has lead and lag outlier
def test_pos_outlier(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
ref_val = [30, 30.28571429, 30.57142857, 30.85714286, 31.14285714,
31.42857143, 31.71428571, 32, 32, 32.14285714,
32.28571429, 32.42857143, 32.57142857, 32.71428571,
32.85714286, 33, 33, 33, 33, 33, 33, 33, 33,
33, 33, 33, 33.28571429, 33.57142857, 33.85714286, 34.14285714]
test_val = [100, 100, 100]
ref_data = {"val": ref_val , "se": [np.nan] * len(ref_val),
"sample_size": [np.nan] * len(ref_val), "geo_id": ["1"] * len(ref_val),
"time_value": pd.date_range(start="2020-09-24", end="2020-10-23")}
test_data = {"val": test_val , "se": [np.nan] * len(test_val),
"sample_size": [np.nan] * len(test_val), "geo_id": ["1"] * len(test_val),
"time_value": pd.date_range(start="2020-10-24", end="2020-10-26")}
ref_data2 = {"val": ref_val , "se": [np.nan] * len(ref_val),
"sample_size": [np.nan] * len(ref_val), "geo_id": ["2"] * len(ref_val),
"time_value": pd.date_range(start="2020-09-24", end="2020-10-23")}
test_data2 = {"val": test_val , "se": [np.nan] * len(test_val),
"sample_size": [np.nan] * len(test_val), "geo_id": ["2"] * len(test_val),
"time_value": pd.date_range(start="2020-10-24",end="2020-10-26")}
ref_df = pd.concat([pd.DataFrame(ref_data), pd.DataFrame(ref_data2)]).reset_index(drop=True)
test_df = pd.concat([pd.DataFrame(test_data), pd.DataFrame(test_data2)]). \
reset_index(drop=True)
validator.check_positive_negative_spikes(
test_df, ref_df, "state", "signal", report)
assert len(report.raised_errors) == 1
assert "check_positive_negative_spikes" in [
err.check_data_id[0] for err in report.raised_errors]
def test_neg_outlier(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
ref_val = [100, 101, 100, 101, 100,
100, 100, 100, 100, 100,
100, 102, 100, 100, 100,
100, 100, 101, 100, 100,
100, 100, 100, 99, 100,
100, 98, 100, 100, 100]
test_val = [10, 10, 10]
ref_data = {"val": ref_val , "se": [np.nan] * len(ref_val),
"sample_size": [np.nan] * len(ref_val), "geo_id": ["1"] * len(ref_val),
"time_value": pd.date_range(start="2020-09-24",end="2020-10-23")}
test_data = {"val": test_val , "se": [np.nan] * len(test_val),
"sample_size": [np.nan] * len(test_val), "geo_id": ["1"] * len(test_val),
"time_value": pd.date_range(start="2020-10-24",end="2020-10-26")}
ref_data2 = {"val": ref_val , "se": [np.nan] * len(ref_val),
"sample_size": [np.nan] * len(ref_val), "geo_id": ["2"] * len(ref_val),
"time_value": pd.date_range(start="2020-09-24",end="2020-10-23")}
test_data2 = {"val": test_val , "se": [np.nan] * len(test_val),
"sample_size": [np.nan] * len(test_val), "geo_id": ["2"] * len(test_val),
"time_value": pd.date_range(start="2020-10-24",end="2020-10-26")}
ref_df = pd.concat([pd.DataFrame(ref_data), pd.DataFrame(ref_data2)]). \
reset_index(drop=True)
test_df = pd.concat([pd.DataFrame(test_data), pd.DataFrame(test_data2)]). \
reset_index(drop=True)
validator.check_positive_negative_spikes(
test_df, ref_df, "state", "signal", report)
assert len(report.raised_errors) == 1
assert "check_positive_negative_spikes" in [
err.check_data_id[0] for err in report.raised_errors]
def test_zero_outlier(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
ref_val = [30, 30.28571429, 30.57142857, 30.85714286, 31.14285714,
31.42857143, 31.71428571, 32, 32, 32.14285714,
32.28571429, 32.42857143, 32.57142857, 32.71428571,
32.85714286, 33, 33, 33, 33, 33, 33, 33, 33,
33, 33, 33, 33.28571429, 33.57142857, 33.85714286, 34.14285714]
test_val = [0, 0, 0]
ref_data = {"val": ref_val , "se": [np.nan] * len(ref_val),
"sample_size": [np.nan] * len(ref_val), "geo_id": ["1"] * len(ref_val),
"time_value": pd.date_range(start="2020-09-24",end="2020-10-23")}
test_data = {"val": test_val , "se": [np.nan] * len(test_val),
"sample_size": [np.nan] * len(test_val), "geo_id": ["1"] * len(test_val),
"time_value": pd.date_range(start="2020-10-24",end="2020-10-26")}
ref_data2 = {"val": ref_val , "se": [np.nan] * len(ref_val),
"sample_size": [np.nan] * len(ref_val), "geo_id": ["2"] * len(ref_val),
"time_value": pd.date_range(start="2020-09-24",end="2020-10-23")}
test_data2 = {"val": test_val , "se": [np.nan] * len(test_val),
"sample_size": [np.nan] * len(test_val), "geo_id": ["2"] * len(test_val),
"time_value": pd.date_range(start="2020-10-24",end="2020-10-26")}
ref_df = pd.concat([pd.DataFrame(ref_data), pd.DataFrame(ref_data2)]). \
reset_index(drop=True)
test_df = pd.concat([pd.DataFrame(test_data), pd.DataFrame(test_data2)]). \
reset_index(drop=True)
validator.check_positive_negative_spikes(
test_df, ref_df, "state", "signal", report)
assert len(report.raised_errors) == 1
assert "check_positive_negative_spikes" in [
err.check_data_id[0] for err in report.raised_errors]
def test_no_outlier(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
#Data from 51580 between 9/24 and 10/26 (10/25 query date)
ref_val = [30, 30.28571429, 30.57142857, 30.85714286, 31.14285714,
31.42857143, 31.71428571, 32, 32, 32.14285714,
32.28571429, 32.42857143, 32.57142857, 32.71428571,
32.85714286, 33, 33, 33, 33, 33, 33, 33, 33,
33, 33, 33, 33, 33, 33, 33]
test_val = [33, 33, 33]
ref_data = {"val": ref_val , "se": [np.nan] * len(ref_val),
"sample_size": [np.nan] * len(ref_val), "geo_id": ["1"] * len(ref_val),
"time_value": pd.date_range(start="2020-09-24",end="2020-10-23")}
test_data = {"val": test_val , "se": [np.nan] * len(test_val),
"sample_size": [np.nan] * len(test_val), "geo_id": ["1"] * len(test_val),
"time_value": pd.date_range(start="2020-10-24",end="2020-10-26")}
ref_data2 = {"val": ref_val , "se": [np.nan] * len(ref_val),
"sample_size": [np.nan] * len(ref_val), "geo_id": ["2"] * len(ref_val),
"time_value": pd.date_range(start="2020-09-24",end="2020-10-23")}
test_data2 = {"val": test_val , "se": [np.nan] * len(test_val),
"sample_size": [np.nan] * len(test_val), "geo_id": ["2"] * len(test_val),
"time_value": pd.date_range(start="2020-10-24",end="2020-10-26")}
ref_df = pd.concat([pd.DataFrame(ref_data), pd.DataFrame(ref_data2)]). \
reset_index(drop=True)
test_df = pd.concat([pd.DataFrame(test_data), pd.DataFrame(test_data2)]). \
reset_index(drop=True)
validator.check_positive_negative_spikes(
test_df, ref_df, "state", "signal", report)
assert len(report.raised_errors) == 0
def test_source_api_overlap(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
#Data from 51580 between 9/24 and 10/26 (10/25 query date)
ref_val = [30, 30.28571429, 30.57142857, 30.85714286, 31.14285714,
31.42857143, 31.71428571, 32, 32, 32.14285714,
32.28571429, 32.42857143, 32.57142857, 32.71428571,
32.85714286, 33, 33, 33, 33, 33, 33, 33, 33, 33,
33, 33, 33, 33, 33, 33, 33, 33, 33]
test_val = [100, 100, 100]
ref_data = {"val": ref_val , "se": [np.nan] * len(ref_val),
"sample_size": [np.nan] * len(ref_val), "geo_id": ["1"] * len(ref_val),
"time_value": pd.date_range(start="2020-09-24",end="2020-10-26")}
test_data = {"val": test_val , "se": [np.nan] * len(test_val),
"sample_size": [np.nan] * len(test_val), "geo_id": ["1"] * len(test_val),
"time_value": pd.date_range(start="2020-10-24",end="2020-10-26")}
ref_data2 = {"val": ref_val , "se": [np.nan] * len(ref_val),
"sample_size": [np.nan] * len(ref_val), "geo_id": ["2"] * len(ref_val),
"time_value": pd.date_range(start="2020-09-24",end="2020-10-26")}
test_data2 = {"val": test_val , "se": [np.nan] * len(test_val),
"sample_size": [np.nan] * len(test_val), "geo_id": ["2"] * len(test_val),
"time_value": pd.date_range(start="2020-10-24",end="2020-10-26")}
ref_df = pd.concat([ | pd.DataFrame(ref_data) | pandas.DataFrame |
#functions for data processing should include:
# a function for getting the epochs
# a function for making a spectrogram
import re
import glob
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import mne
from sklearn.svm import LinearSVC, SVC
import scipy.signal as spsig
from tkinter import filedialog
from tkinter import *
from sklearn.model_selection import ShuffleSplit, cross_val_score
from mne.decoding import CSP
from sklearn.pipeline import Pipeline
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
import pycaret.classification as pyclf
SAMS_PATH = r"C:\Users\Owner\OneDrive - Regis University\laryngeal_bci\data\fifs\\"
NATES_PATH = r"C:\Users\words\OneDrive - Regis University\laryngeal_bci\data\fifs\\"
# make a class to hold information from get epochs
class spectrogramData:
# It would be good to add in a way of storing an annotation description
# for each spectrogram so we can confirm the type of trial being represented
def __init__(self, spectrograms, frequencies, times):
self.spectrograms = spectrograms
self.frequencies = frequencies
self.times = times
class eegData:
def __init__(self, path):
"""
Path sould be 'Nates', 'Sams', or the actual filepath to the data.
"""
if path == 'Nates':
self.path = r"C:\Users\words\OneDrive - Regis University\laryngeal_bci\data\fifs\\"
elif path == 'Sams':
self.path = r"C:\Users\Owner\OneDrive - Regis University\laryngeal_bci\data\fifs\\"
else:
self.path = path
self.alpha_spectrograms_true = None
self.alpha_spectrograms_false = None
self.SSVEP_spectrograms_true = None
self.SSVEP_spectrograms_false = None
self.MA_spectrograms_true = None
self.MA_spectrograms_false = None
self.MI_spectrograms_true = None
self.MI_spectrograms_false = None
self.LA_spectrograms_true = None
self.LA_spectrograms_false = None
self.LI_spectrograms_true = None
self.LI_spectrograms_false = None
self.MA_epochs = None
self.MI_epochs = None
self.LA_epochs = None
self.LI_epochs = None
self.viz_channels = ['O1', 'O2', 'P3', 'P4']
def load_data(self, filename):
return mne.io.read_raw_fif(filename, preload=True, verbose=0)
def clean_data(self, data, first_seconds_remove=2, bandpass_range=(5, 50)):
"""
Removes first 2 seconds and bandpass filters data.
Takes MNE data object and tuple for bandpass filter range.
"""
# bandpass filter
data = data.filter(*bandpass_range)
data = data.notch_filter(np.arange(60, 241, 60))
# print(f'removing first {first_seconds_remove} seconds')
data.crop(first_seconds_remove)
return data
def flatten_bad_channels(self, data, channels):
"""
Sets bad channels to 0s.
Operates on data in-place.
data - MNE data object
channels - string or list of strings with channel names
"""
data.apply_function(lambda x: x * 0, channels)
def standardize_all_channels(self, data):
"""
Subtracts the mean from each channel, divide by the standard deviation.
"""
for channel in data.ch_names:
# get mean and std, subtract mean, divide by std
mean = data.get_data(channel)
std = np.std(data.get_data(channel))
mean = np.mean(data.get_data(channel))
data.apply_function(lambda x: (x - mean)/std, channel)
return data
def load_clean_all_data(self, flatten=False, standardize=True):
"""
Loads and cleans all current data.
"""
filenames = [f for f in glob.glob(self.path + '*_raw.fif.gz')]
self.filenames = filenames
list_of_data = []
for f in filenames:
data = self.load_data(f)
data = self.clean_data(data)
if flatten:
if re.search('N-\d\.2-22-2021', f) is not None:
clean_bad_channels(data, 'P3')
elif f == 'BCIproject_trial-S-1.3-4-2021_raw.fif.gz':
clean_bad_channels(data, 'F8')
elif f == 'BCIproject_trial-S-2.3-8-2021_raw.fif.gz':
clean_bad_channels(data, 'Cz')
elif f == 'BCIproject_trial-S-3.3-25-2021_raw.fif.gz':
pass
# clean_bad_channels(data, ['Cz', 'C1'])
if standardize:
self.standardize_all_channels(data)
list_of_data.append(data)
all_data = mne.concatenate_raws(list_of_data)
self.annotation_descriptions = [i["description"] for i in all_data.annotations]
self.data = all_data
self.get_all_epochs()
self.get_all_spectrograms()
def load_clean_one_dataset(self, filename, flatten=False, standardize=True):
"""
Loads and cleans one dataset
"""
self.data = self.load_data(filename)
self.data = self.clean_data(self.data)
if flatten:
# clean_bad_channels(data, 'P3')
if re.search('N-\d\.2-22-2021', f) is not None:
clean_bad_channels(data, 'P3')
elif filename == 'BCIproject_trial-S-1.3-4-2021_raw.fif.gz':
clean_bad_channels(data, 'F8')
elif filename == 'BCIproject_trial-S-2.3-8-2021_raw.fif.gz':
clean_bad_channels(data, 'Cz')
elif filename == 'BCIproject_trial-S-3.3-25-2021_raw.fif.gz':
pass
# clean_bad_channels(data, ['Cz', 'C1'])
pass
if standardize:
self.standardize_all_channels(self.data)
self.annotation_descriptions = self.data.annotations
self.get_all_epochs()
self.get_all_spectrograms()
def get_epochs(self, annotation_regexp):
"""
Retrieves epochs with a label via the annotation_regexp (regular expression).
"""
events, eventid = mne.events_from_annotations(self.data, regexp=annotation_regexp, verbose=0)
picks = mne.pick_types(self.data.info, eeg=True)
epochs = mne.Epochs(self.data, events, tmin=0, tmax=5, picks=picks, preload=True, baseline=None, verbose=0)
return epochs
def get_all_epochs(self):
"""
Stores all epochs for separate events in attributes.
"""
epochs_variables = ['alpha_epochs',
'SSVEP_epochs',
'MA_epochs',
'MI_epochs',
'LA_epochs',
'LI_epochs']
epochs_regexps = ['.*alpha', '.*SSVEP.*', '.*-TMI-a', '.*-TMI-i', '.*-LMI-a', '.*-LMI-i']
for var, regexp in zip(epochs_variables, epochs_regexps):
try:
epochs = self.get_epochs(regexp)
setattr(self, var, epochs)
except:
print(f'no epochs found for {regexp}')
def get_spectrograms(self, annotation_regexp, variable_for_storing_spectrogram, nperseg=2000, noverlap=1000, channels=None):
if channels is None:
channels = self.viz_channels
elif channels is 'all':
channels = self.data.ch_names
epochs = self.get_epochs(annotation_regexp=annotation_regexp)
spectData = []
# adjusts times so they match the spectrograms
time_add = nperseg / self.data.info['sfreq'] - 1
for i in range(len(epochs)):
spectrograms = []
channel_data = epochs[i].pick_channels(channels).get_data()[0]
for k in range(channel_data.shape[0]):
# frequency, time, intensity (shape f*t)
frequencies, times, spectrogram = spsig.spectrogram(channel_data[k, :],
fs=int(self.data.info['sfreq']),
nperseg=nperseg,
noverlap=noverlap)
spectrograms.append(spectrogram)
average_spectogram = np.mean(np.array(spectrograms), axis=0)
spectData.append(spectrogramData(average_spectogram, frequencies, times))
setattr(self, variable_for_storing_spectrogram, spectData)
def get_all_spectrograms(self):
spectrogram_variables = ['alpha_spectrograms_true',
'alpha_spectrograms_false',
'MA_spectrograms_true',
'MA_spectrograms_false',
'MI_spectrograms_true',
'MI_spectrograms_false',
'LA_spectrograms_true',
'LA_spectrograms_false',
'LI_spectrograms_true',
'LI_spectrograms_false']
annotation_regular_expressions = ['True-alpha-',
'False-alpha-',
'True-SSVEP-.*',
'False-SSVEP-.*',
'True-TMI-a-',
'False-TMI-a-',
'True-TMI-i-',
'False-TMI-i-',
'True-LMI-a-',
'False-LMI-a-',
'True-LMI-i-',
'False-LMI-i-']
motor_channels = self.data.ch_names
channels = [self.viz_channels] * 4 + [motor_channels] * 8
for annot_regex, spect_var, chans in zip(annotation_regular_expressions, spectrogram_variables, channels):
try:
self.get_spectrograms(annot_regex, spect_var, channels=chans)
except:
print(f'no epochs found for {annot_regex}')
def create_alpha_spectrograms(self, nperseg=2000, noverlap=1000, channels=None):
"""
Create the false_epochs and true_epochs to be used in displaying an alpha wave spectrogram.
"""
if channels is None:
channels = self.viz_channels
self.get_spectrograms('True-alpha-', 'alpha_spectrograms_true', nperseg=nperseg, noverlap=noverlap, channels=channels)
self.get_spectrograms('False-alpha-', 'alpha_spectrograms_false', nperseg=nperseg, noverlap=noverlap, channels=channels)
def plot_spectrogram(self, spectrogram_data, savefig=False, filename=None, ylim=[5, 50], vmax=None):
"""Plots a spectrogram of FFT.
Parameters
----------
spectrogram_data : spectrogramData object
savefig : boolean
Whether to save the figure to disk.
filename : str
File name of the saved image.
"""
f = plt.figure(figsize=(5, 5))
plt.pcolormesh(spectrogram_data.times,
spectrogram_data.frequencies,
spectrogram_data.spectrograms,
shading='gouraud',
vmax=vmax)
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.ylim(ylim)
plt.colorbar()
plt.tight_layout()
plt.show()
if savefig:
if filename is None:
filename = 'saved_plot.png'
plt.savefig(filename, dpi=300)
def plot_spectrograms_2d_comparison(self, spectrogram_data_1, spectrogram_data_2, labels, filename=None):
"""
Compares spectrograms from two different epoch types.
"""
frequencies = spectrogram_data_1[0].frequencies
avg_spec_1 = np.array([s.spectrograms.mean(axis=1) for s in spectrogram_data_1]).mean(axis=0)
avg_spec_2 = np.array([s.spectrograms.mean(axis=1) for s in spectrogram_data_2]).mean(axis=0)
f = plt.figure(figsize=(5.5, 5.5))
plt.plot(frequencies, avg_spec_1, label=labels[0])
plt.plot(frequencies, avg_spec_2, linestyle='--', label=labels[1])
plt.legend()
plt.xlim([0, 50])
plt.xlabel('Frequency (Hz)')
plt.ylabel('Intensity (a.u.)')
plt.tight_layout()
if filename is not None:
plt.savefig(filename, dpi=300)
plt.show()
def plot_all_alpha_spectrograms(self, channels=None, reset_spectrograms=False, vmax=50):
"""
Plots all alpha spectrograms.
"""
if channels is None:
channels = self.viz_channels
if self.alpha_spectrograms_true is None or reset_spectrograms:
self.create_alpha_spectrograms(channels=channels)
for i in range(len(self.alpha_spectrograms_false)):
print("False Alpha : " + str(i))
self.plot_spectrogram(self.alpha_spectrograms_false[i], vmax=vmax)
for i in range(len(self.alpha_spectrograms_true)):
print("True Alpha : " + str(i))
self.plot_spectrogram(self.alpha_spectrograms_true[i], vmax=vmax)
def create_SSVEP_spectrograms(self, nperseg=2000, noverlap=1000, channels=None):
"""
Create the ssvep spectrograms
"""
self.get_spectrograms('True-SSVEP-.*', 'SSVEP_spectrograms_true', nperseg=nperseg, noverlap=noverlap, channels=channels)
self.get_spectrograms('False-SSVEP-.*', 'SSVEP_spectrograms_false', nperseg=nperseg, noverlap=noverlap, channels=channels)
def create_LMI_a_spectrograms(self, nperseg=2000, noverlap=1000, channels='all'):
"""
Create the laryngeal activity based spectrograms
"""
self.get_spectrograms('True-LMI-a-', 'LMI_a_spectrograms_true', nperseg=nperseg, noverlap=noverlap, channels=channels)
self.get_spectrograms('False-LMI-a-', 'LMI_a_spectrograms_false', nperseg=nperseg, noverlap=noverlap, channels=channels)
def create_LMI_i_spectrograms(self, nperseg=2000, noverlap=1000, channels='all'):
"""
Create the laryngeal activity based spectrograms
"""
self.get_spectrograms('True-LMI-i-', 'LMI_i_spectrograms_true', nperseg=nperseg, noverlap=noverlap, channels=channels)
self.get_spectrograms('False-LMI-i-', 'LMI_i_spectrograms_false', nperseg=nperseg, noverlap=noverlap, channels=channels)
def plot_all_SSVEP_spectrograms(self, channels=None, reset_spectrograms=False, vmax=5):
"""
Plot the SSVEP spectrograms
"""
if channels is None:
channels = self.viz_channels
if self.SSVEP_spectrograms_false is None or reset_spectrograms:
self.create_SSVEP_spectrograms(channels=channels)
for i in range(len(self.SSVEP_spectrograms_false)):
print("False SSVEP : " + str(i))
self.plot_spectrogram(self.SSVEP_spectrograms_false[i], vmax=vmax)
for i in range(len(self.SSVEP_spectrograms_false)):
print("True SSVEP : " + str(i))
self.plot_spectrogram(self.SSVEP_spectrograms_true[i], vmax=vmax)
def prepare_SSVEP_data_for_ml(self, f1=None, f2=None, train_fraction=0.8, num_groups=3):
# indices for trimming data; frequencies are in 0.5Hz increments from 0 to 500
freq_idxs = (10, 101)
np.random.seed(42)
self.SSVEP_test_df = None
if f1 is None or f2 is None:
if self.SSVEP_spectrograms_true is None:
self.create_SSVEP_spectrograms()
f1_spectrograms, f1_frequencies, f1_times, f1_groups = [], [], [], []
f2_spectrograms, f2_frequencies, f2_times, f2_groups = [], [], [], []
for i in range(len(self.SSVEP_spectrograms_false)):
f1_spectrograms.append(self.SSVEP_spectrograms_true[i].spectrograms[freq_idxs[0]:freq_idxs[1]])
f1_frequencies.append(self.SSVEP_spectrograms_true[i].frequencies[freq_idxs[0]:freq_idxs[1]])
f1_times.append(self.SSVEP_spectrograms_true[i].times)
f1_groups.append(len(self.SSVEP_spectrograms_true[i].times) * [i])
f2_spectrograms.append(self.SSVEP_spectrograms_false[i].spectrograms[freq_idxs[0]:freq_idxs[1]])
f2_frequencies.append(self.SSVEP_spectrograms_false[i].frequencies[freq_idxs[0]:freq_idxs[1]])
f2_times.append(self.SSVEP_spectrograms_false[i].times)
f2_groups.append(len(self.SSVEP_spectrograms_false[i].times) * [i])
f1 = spectrogramData(f1_spectrograms, f1_frequencies, f1_times)
f2 = spectrogramData(f2_spectrograms, f2_frequencies, f2_times)
num_train_samples = int(train_fraction * len(f1.spectrograms))
idxs = list(range(len(f1.spectrograms)))
train_idxs = np.random.choice(idxs, num_train_samples, replace=False)
train_f1s = np.concatenate([f1.spectrograms[i] for i in train_idxs], axis=1)
train_f2s = np.concatenate([f2.spectrograms[i] for i in train_idxs], axis=1)
train_f1_groups = np.concatenate([f1_groups[i] for i in train_idxs])
train_f2_groups = np.concatenate([f2_groups[i] for i in train_idxs])
if train_fraction < 1:
test_idxs = list(set(idxs).difference(set(train_idxs)))
test_f1s = np.concatenate([f1.spectrograms[i] for i in test_idxs], axis=1)
test_f2s = np.concatenate([f2.spectrograms[i] for i in test_idxs], axis=1)
test_f1_groups = np.concatenate([f1_groups[i] for i in test_idxs])
test_f2_groups = np.concatenate([f1_groups[i] for i in test_idxs])
train_features = np.concatenate((train_f1s, train_f2s), axis=-1)
train_features = train_features.T
train_targets = np.array([1] * train_f1s.shape[1] + [0] * train_f2s.shape[1])
train_groups = np.concatenate((train_f1_groups, train_f2_groups))
if train_fraction < 1:
test_features = np.concatenate((test_f1s, test_f2s), axis=-1)
test_targets = np.array([1] * test_f1s.shape[1] + [0] * test_f2s.shape[1])
test_groups = np.concatenate((test_f1_groups, test_f2_groups))
test_features = test_features.T
self.SSVEP_train_df = pd.DataFrame(train_features)
self.SSVEP_train_df['target'] = train_targets
self.SSVEP_train_df['group'] = train_groups
# required for pycaret to work if targets are the actual frequencies
# self.SSVEP_train_df['target'] = self.SSVEP_train_df['target'].astype('category')
if train_fraction < 1:
self.SSVEP_test_df = pd.DataFrame(test_features)
self.SSVEP_test_df['target'] = test_targets
# self.SSVEP_test_df['target'] = self.SSVEP_test_df['target'].astype('category')
self.SSVEP_test_df['group'] = test_groups
experiments_per_group = self.SSVEP_train_df['group'].unique().shape[0] // num_groups
unique_groups = self.SSVEP_train_df['group'].unique()
np.random.shuffle(unique_groups)
group_idxs = []
for i in range(num_groups):
if i == num_groups - 1: # if last group
experiments = unique_groups[i * experiments_per_group:]
else:
experiments = unique_groups[i * experiments_per_group:(i + 1) * experiments_per_group]
group_idxs.append(self.SSVEP_train_df.loc[self.SSVEP_train_df['group'].isin(experiments)].index)
for i, idxs in enumerate(group_idxs):
self.SSVEP_train_df.loc[idxs, 'group'] = i
def prepare_LMI_a_data_for_ml(self, f1=None, f2=None, train_fraction=0.8, num_groups=3):
# indices for trimming data; frequencies are in 0.5Hz increments from 0 to 500
freq_idxs = (10, 101)
np.random.seed(42)
self.LMI_a_test_df = None
if f1 is None or f2 is None:
if self.LMI_a_spectrograms_true is None:
self.create_LMI_a_spectrograms()
f1_spectrograms, f1_frequencies, f1_times, f1_groups = [], [], [], []
f2_spectrograms, f2_frequencies, f2_times, f2_groups = [], [], [], []
for i in range(len(self.LMI_a_spectrograms_false)):
f1_spectrograms.append(self.LMI_a_spectrograms_true[i].spectrograms[freq_idxs[0]:freq_idxs[1]])
f1_frequencies.append(self.LMI_a_spectrograms_true[i].frequencies[freq_idxs[0]:freq_idxs[1]])
f1_times.append(self.LMI_a_spectrograms_true[i].times)
f1_groups.append(len(self.LMI_a_spectrograms_true[i].times) * [i])
f2_spectrograms.append(self.LMI_a_spectrograms_false[i].spectrograms[freq_idxs[0]:freq_idxs[1]])
f2_frequencies.append(self.LMI_a_spectrograms_false[i].frequencies[freq_idxs[0]:freq_idxs[1]])
f2_times.append(self.LMI_a_spectrograms_false[i].times)
f2_groups.append(len(self.LMI_a_spectrograms_false[i].times) * [i])
f1 = spectrogramData(np.array(f1_spectrograms), np.array(f1_frequencies), np.array(f1_times))
f2 = spectrogramData(np.array(f2_spectrograms), np.array(f2_frequencies), np.array(f2_times))
f1 = spectrogramData(f1_spectrograms, f1_frequencies, f1_times)
f2 = spectrogramData(f2_spectrograms, f2_frequencies, f2_times)
num_train_samples = int(train_fraction * len(f1.spectrograms))
idxs = list(range(len(f1.spectrograms)))
train_idxs = np.random.choice(idxs, num_train_samples, replace=False)
train_f1s = np.concatenate([f1.spectrograms[i] for i in train_idxs], axis=1)
train_f2s = np.concatenate([f2.spectrograms[i] for i in train_idxs], axis=1)
train_f1_groups = np.concatenate([f1_groups[i] for i in train_idxs])
train_f2_groups = np.concatenate([f2_groups[i] for i in train_idxs])
if train_fraction < 1:
test_idxs = list(set(idxs).difference(set(train_idxs)))
test_f1s = np.concatenate([f1.spectrograms[i] for i in test_idxs], axis=1)
test_f2s = np.concatenate([f2.spectrograms[i] for i in test_idxs], axis=1)
test_f1_groups = np.concatenate([f1_groups[i] for i in test_idxs])
test_f2_groups = np.concatenate([f1_groups[i] for i in test_idxs])
train_features = np.concatenate((train_f1s, train_f2s), axis=-1)
train_features = train_features.T
train_targets = np.array([1] * train_f1s.shape[1] + [0] * train_f2s.shape[1])
train_groups = np.concatenate((train_f1_groups, train_f2_groups))
self.LMI_a_train_df = pd.DataFrame(train_features)
self.LMI_a_train_df['target'] = train_targets
self.LMI_a_train_df['group'] = train_groups
if train_fraction < 1:
test_features = np.concatenate((test_f1s, test_f2s), axis=-1)
test_targets = np.array([1] * test_f1s.shape[1] + [0] * test_f2s.shape[1])
test_groups = np.concatenate((test_f1_groups, test_f2_groups))
test_features = test_features.T
self.LMI_a_test_df = pd.DataFrame(test_features)
self.LMI_a_test_df['target'] = test_targets
self.LMI_a_test_df['group'] = test_groups
experiments_per_group = self.LMI_a_train_df['group'].unique().shape[0] // num_groups
unique_groups = self.LMI_a_train_df['group'].unique()
np.random.shuffle(unique_groups)
group_idxs = []
for i in range(num_groups):
if i == num_groups - 1: # if last group
experiments = unique_groups[i * experiments_per_group:]
else:
experiments = unique_groups[i * experiments_per_group:(i + 1) * experiments_per_group]
group_idxs.append(self.LMI_a_train_df.loc[self.LMI_a_train_df['group'].isin(experiments)].index)
for i, idxs in enumerate(group_idxs):
self.LMI_a_train_df.loc[idxs, 'group'] = i
def prepare_LMI_i_data_for_ml(self, f1=None, f2=None, train_fraction=0.8, num_groups=3):
# indices for trimming data; frequencies are in 0.5Hz increments from 0 to 500
freq_idxs = (10, 101)
np.random.seed(42)
self.LMI_i_test_df = None
if f1 is None or f2 is None:
if self.LMI_i_spectrograms_true is None:
self.create_LMI_i_spectrograms()
f1_spectrograms, f1_frequencies, f1_times, f1_groups = [], [], [], []
f2_spectrograms, f2_frequencies, f2_times, f2_groups = [], [], [], []
for i in range(len(self.LMI_i_spectrograms_false)):
f1_spectrograms.append(self.LMI_i_spectrograms_true[i].spectrograms[freq_idxs[0]:freq_idxs[1]])
f1_frequencies.append(self.LMI_i_spectrograms_true[i].frequencies[freq_idxs[0]:freq_idxs[1]])
f1_times.append(self.LMI_i_spectrograms_true[i].times)
f1_groups.append(len(self.LMI_i_spectrograms_true[i].times) * [i])
f2_spectrograms.append(self.LMI_i_spectrograms_false[i].spectrograms[freq_idxs[0]:freq_idxs[1]])
f2_frequencies.append(self.LMI_i_spectrograms_false[i].frequencies[freq_idxs[0]:freq_idxs[1]])
f2_times.append(self.LMI_i_spectrograms_false[i].times)
f2_groups.append(len(self.LMI_i_spectrograms_false[i].times) * [i])
f1 = spectrogramData(f1_spectrograms, f1_frequencies, f1_times)
f2 = spectrogramData(f2_spectrograms, f2_frequencies, f2_times)
num_train_samples = int(train_fraction * len(f1.spectrograms))
idxs = list(range(len(f1.spectrograms)))
train_idxs = np.random.choice(idxs, num_train_samples, replace=False)
train_f1s = np.concatenate([f1.spectrograms[i] for i in train_idxs], axis=1)
train_f2s = np.concatenate([f2.spectrograms[i] for i in train_idxs], axis=1)
train_f1_groups = np.concatenate([f1_groups[i] for i in train_idxs])
train_f2_groups = np.concatenate([f2_groups[i] for i in train_idxs])
if train_fraction < 1:
test_idxs = list(set(idxs).difference(set(train_idxs)))
test_f1s = np.concatenate([f1.spectrograms[i] for i in test_idxs], axis=1)
test_f2s = np.concatenate([f2.spectrograms[i] for i in test_idxs], axis=1)
test_f1_groups = np.concatenate([f1_groups[i] for i in test_idxs])
test_f2_groups = np.concatenate([f1_groups[i] for i in test_idxs])
train_features = np.concatenate((train_f1s, train_f2s), axis=-1)
train_features = train_features.T
train_targets = np.array([1] * train_f1s.shape[1] + [0] * train_f2s.shape[1])
train_groups = np.concatenate((train_f1_groups, train_f2_groups))
self.LMI_i_train_df = pd.DataFrame(train_features)
self.LMI_i_train_df['target'] = train_targets
self.LMI_i_train_df['group'] = train_groups
if train_fraction < 1:
test_features = np.concatenate((test_f1s, test_f2s), axis=-1)
test_targets = np.array([1] * test_f1s.shape[1] + [0] * test_f2s.shape[1])
test_groups = np.concatenate((test_f1_groups, test_f2_groups))
test_features = test_features.T
self.LMI_i_test_df = pd.DataFrame(test_features)
self.LMI_i_test_df['target'] = test_targets
self.LMI_i_test_df['group'] = test_groups
experiments_per_group = self.LMI_i_train_df['group'].unique().shape[0] // num_groups
unique_groups = self.LMI_i_train_df['group'].unique()
np.random.shuffle(unique_groups)
group_idxs = []
for i in range(num_groups):
if i == num_groups - 1: # if last group
experiments = unique_groups[i * experiments_per_group:]
else:
experiments = unique_groups[i * experiments_per_group:(i + 1) * experiments_per_group]
group_idxs.append(self.LMI_i_train_df.loc[self.LMI_i_train_df['group'].isin(experiments)].index)
for i, idxs in enumerate(group_idxs):
self.LMI_i_train_df.loc[idxs, 'group'] = i
def fit_LMI_a_ML_and_report(self, num_groups=3, use_gpu=False):
groups = self.LMI_a_train_df.group
if self.LMI_a_test_df is None:
self.LMI_a_pycaret_setup = pyclf.setup(data=self.LMI_a_train_df.drop('group', axis=1),
target='target',
use_gpu=use_gpu,
fold_strategy='groupkfold',
fold_groups=groups,
fold=num_groups,
silent=True)
else:
self.LMI_a_pycaret_setup = pyclf.setup(data=self.LMI_a_train_df.drop('group', axis=1),
test_data=self.LMI_a_test_df.drop('group', axis=1),
target='target',
use_gpu=use_gpu,
fold_strategy='groupkfold',
fold_groups=groups,
fold=num_groups,
silent=True)
models = pyclf.models()
fit_models = pyclf.compare_models(groups=groups, n_select=models.shape[0])
# now tune and select top model
tuned = [pyclf.tune_model(model, search_library='scikit-optimize', groups=groups) for model in fit_models]
self.best_LMI_a_clf = pyclf.compare_models(tuned, groups=groups)
self.LMI_a_score_grid = pyclf.pull()
def fit_LMI_i_ML_and_report(self, num_groups=3, use_gpu=False):
groups = self.LMI_i_train_df.group
if self.LMI_i_test_df is None:
self.LMI_i_pycaret_setup = pyclf.setup(data=self.LMI_i_train_df.drop('group', axis=1),
target='target',
use_gpu=use_gpu,
fold_strategy='groupkfold',
fold_groups=groups,
fold=num_groups,
silent=True)
else:
self.LMI_i_pycaret_setup = pyclf.setup(data=self.LMI_i_train_df.drop('group', axis=1),
test_data=self.LMI_i_test_df.drop('group', axis=1),
target='target',
use_gpu=use_gpu,
fold_strategy='groupkfold',
fold_groups=groups,
fold=num_groups,
silent=True)
models = pyclf.models()
fit_models = pyclf.compare_models(groups=groups, n_select=models.shape[0])
# now tune and select top model
tuned = [pyclf.tune_model(model, search_library='scikit-optimize', groups=groups) for model in fit_models]
self.best_LMI_i_clf = pyclf.compare_models(tuned, groups=groups)
self.LMI_i_score_grid = pyclf.pull()
def fit_SSVEP_ML_and_report(self, num_groups=3, use_gpu=False):
groups = self.SSVEP_train_df.group
if self.SSVEP_test_df is None:
self.SSVEP_pycaret_setup = pyclf.setup(data=self.SSVEP_train_df.drop('group', axis=1),
target='target',
use_gpu=use_gpu,
fold_strategy='groupkfold',
fold_groups=groups,
fold=num_groups,
silent=True)
else:
self.SSVEP_pycaret_setup = pyclf.setup(data=self.SSVEP_train_df.drop('group', axis=1),
test_data=self.SSVEP_test_df.drop('group', axis=1),
target='target',
use_gpu=use_gpu,
fold_strategy='groupkfold',
fold_groups=groups,
fold=num_groups,
silent=True)
models = pyclf.models()
fit_models = pyclf.compare_models(groups=groups, n_select=models.shape[0])
# now tune and select top model
tuned = [pyclf.tune_model(model, search_library='scikit-optimize', groups=groups) for model in fit_models]
self.best_SSVEP_clf = pyclf.compare_models(tuned, groups=groups)
self.SSVEP_score_grid = pyclf.pull()
def fit_motor_imagery_and_report(self, train_fraction=0.8, num_groups=3):
np.random.seed(42)
# True is 2, False 1
labels = self.MI_epochs.events[:, -1] == 2
# throw away last point so number of points is 5000
epochs_data = self.MI_epochs.get_data()[:, :, :-1]
# create extra expochs by splitting them into fifths
split_arrs = []
for i in range(epochs_data.shape[0]):
split_arrs.extend(np.split(epochs_data[i], 5, -1))
extra_epochs = np.stack(split_arrs)
extra_labels = []
for l in labels:
extra_labels.extend([int(l)] * 5)
extra_labels = np.array(extra_labels)
true_counter = 0
false_counter = 0
groups = []
for event in labels == 2:
if event is True:
groups.extend([true_counter] * 5)
true_counter += 1
else:
groups.extend([false_counter] * 5)
false_counter += 1
groups = np.array(groups)
unique_groups = np.unique(groups)
np.random.shuffle(unique_groups)
train_groups = np.random.choice(unique_groups, size=int(train_fraction * unique_groups.shape[0]))
train_idxs = np.where(np.isin(groups, train_groups))
test_idxs = np.where(np.isin(groups, train_groups, invert=True))
self.MI_csp = CSP()
csp_data_train = self.MI_csp.fit_transform(extra_epochs[train_idxs], extra_labels[train_idxs])
csp_data_test = self.MI_csp.transform(extra_epochs[test_idxs])
self.mi_csp_df_train = pd.DataFrame(csp_data_train)
self.mi_csp_df_train['target'] = extra_labels[train_idxs]
self.mi_csp_df_train['group'] = groups[train_idxs]
self.mi_csp_df_test = pd.DataFrame(csp_data_test)
self.mi_csp_df_test['target'] = extra_labels[test_idxs]
self.mi_csp_df_test['group'] = groups[test_idxs]
experiments_per_group = train_groups.shape[0] // num_groups
unique_train_groups = self.mi_csp_df_train['group'].unique()
group_idxs = []
for i in range(num_groups):
if i == num_groups - 1: # if last group
experiments = unique_train_groups[i * experiments_per_group:]
else:
experiments = unique_train_groups[i * experiments_per_group:(i + 1) * experiments_per_group]
group_idxs.append(self.mi_csp_df_train.loc[self.mi_csp_df_train['group'].isin(experiments)].index)
for i, idxs in enumerate(group_idxs):
self.mi_csp_df_train.loc[idxs, 'group'] = i
self.mi_csp_df_test['group'] = num_groups
groups = self.mi_csp_df_train.group
self.mi_setup = pyclf.setup(data=self.mi_csp_df_train.drop('group', axis=1),
test_data=self.mi_csp_df_test.drop('group', axis=1),
target='target',
use_gpu=True,
fold_strategy='groupkfold',
fold_groups=groups,
fold=num_groups,
silent=True)
models = pyclf.models()
fit_models = pyclf.compare_models(groups=groups, n_select=models.shape[0])
# now tune and select top model
tuned = [pyclf.tune_model(model, search_library='scikit-optimize', groups=groups) for model in fit_models]
self.best_mi_clf = pyclf.compare_models(tuned, groups=groups)
self.mi_score_grid = pyclf.pull()
def fit_motor_actual_and_report(self, train_fraction=0.8, num_groups=3):
np.random.seed(42)
# True is 2, False 1
labels = self.MA_epochs.events[:, -1] == 2
# throw away last point so number of points is 5000
epochs_data = self.MA_epochs.get_data()[:, :, :-1]
# create extra expochs by splitting them into fifths
split_arrs = []
for i in range(epochs_data.shape[0]):
split_arrs.extend(np.split(epochs_data[i], 5, -1))
extra_epochs = np.stack(split_arrs)
extra_labels = []
for l in labels:
extra_labels.extend([int(l)] * 5)
extra_labels = np.array(extra_labels)
true_counter = 0
false_counter = 0
groups = []
for event in labels == 2:
if event is True:
groups.extend([true_counter] * 5)
true_counter += 1
else:
groups.extend([false_counter] * 5)
false_counter += 1
groups = np.array(groups)
unique_groups = np.unique(groups)
np.random.shuffle(unique_groups)
train_groups = np.random.choice(unique_groups, size=int(train_fraction * unique_groups.shape[0]))
train_idxs = np.where(np.isin(groups, train_groups))
test_idxs = np.where(np.isin(groups, train_groups, invert=True))
self.MA_csp = CSP()
csp_data_train = self.MA_csp.fit_transform(extra_epochs[train_idxs], extra_labels[train_idxs])
csp_data_test = self.MA_csp.transform(extra_epochs[test_idxs])
self.ma_csp_df_train = | pd.DataFrame(csp_data_train) | pandas.DataFrame |
from collections import deque
from datetime import datetime
import operator
import numpy as np
import pytest
import pytz
import pandas as pd
import pandas._testing as tm
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
with pytest.raises(TypeError):
x >= y
with pytest.raises(TypeError):
x > y
with pytest.raises(TypeError):
x < y
with pytest.raises(TypeError):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(TypeError):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.slow
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(10 ** 6).reshape(100, -1)
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = pd.DataFrame({"A": dti, "B": ser})
other = pd.DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{
"A": pd.Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
}
)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(
self, all_arithmetic_operators, float_frame, mixed_float_frame
):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith("__r"):
return getattr(operator, op.replace("__r", "__"))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
@pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])
def test_arith_flex_frame_mixed(
self, op, int_frame, mixed_int_frame, mixed_float_frame
):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ["__sub__"]:
dtype = dict(B="uint64", C=None)
elif op in ["__add__", "__mul__"]:
dtype = dict(C=None)
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs("a")
col = df["two"]
# after arithmetic refactor, add truediv here
ops = ["add", "sub", "mul", "mod"]
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([], dtype=object)
df_len0 = pd.DataFrame(columns=["A", "B"])
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
with pytest.raises(NotImplementedError, match="fill_value"):
df.add(ser_len0, fill_value="E")
with pytest.raises(NotImplementedError, match="fill_value"):
df_len0.sub(df["A"], axis=None, fill_value=3)
class TestFrameArithmetic:
def test_td64_op_nat_casting(self):
# Make sure we don't accidentally treat timedelta64(NaT) as datetime64
# when calling dispatch_to_series in DataFrame arithmetic
ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]")
df = pd.DataFrame([[1, 2], [3, 4]])
result = df * ser
expected = pd.DataFrame({0: ser, 1: ser})
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
expected = pd.DataFrame(
[[2, 4], [4, 6], [6, 8]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + rowlike
tm.assert_frame_equal(result, expected)
result = rowlike + df
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_collike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
expected = pd.DataFrame(
[[1, 2], [5, 6], [9, 10]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + collike
tm.assert_frame_equal(result, expected)
result = collike + df
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
exvals = [
getattr(df.loc["A"], opname)(rowlike.squeeze()),
getattr(df.loc["B"], opname)(rowlike.squeeze()),
getattr(df.loc["C"], opname)(rowlike.squeeze()),
]
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
if opname in ["__rmod__", "__rfloordiv__"]:
# exvals will have dtypes [f8, i8, i8] so expected will be
# all-f8, but the DataFrame operation will return mixed dtypes
# use exvals[-1].dtype instead of "i8" for compat with 32-bit
# systems/pythons
expected[False] = expected[False].astype(exvals[-1].dtype)
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {
True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze()),
}
dtype = None
if opname in ["__rmod__", "__rfloordiv__"]:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
def test_df_bool_mul_int(self):
# GH 22047, GH 22163 multiplication by 1 should result in int dtype,
# not object dtype
df = pd.DataFrame([[False, True], [False, False]])
result = df * 1
# On appveyor this comes back as np.int32 instead of np.int64,
# so we check dtype.kind instead of just dtype
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
result = 1 * df
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
def test_arith_mixed(self):
left = pd.DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]})
result = left + left
expected = pd.DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]})
tm.assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = pd.DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]})
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
tm.assert_series_equal(result[col], op(df[col], 1))
_test_op(df, operator.add)
_test_op(df, operator.sub)
_test_op(df, operator.mul)
_test_op(df, operator.truediv)
_test_op(df, operator.floordiv)
_test_op(df, operator.pow)
_test_op(df, lambda x, y: y + x)
_test_op(df, lambda x, y: y - x)
_test_op(df, lambda x, y: y * x)
_test_op(df, lambda x, y: y / x)
_test_op(df, lambda x, y: y ** x)
_test_op(df, lambda x, y: x + y)
_test_op(df, lambda x, y: x - y)
_test_op(df, lambda x, y: x * y)
_test_op(df, lambda x, y: x / y)
_test_op(df, lambda x, y: x ** y)
@pytest.mark.parametrize(
"values", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3), deque([1, 2])]
)
def test_arith_alignment_non_pandas_object(self, values):
# GH#17901
df = pd.DataFrame({"A": [1, 1], "B": [1, 1]})
expected = pd.DataFrame({"A": [2, 2], "B": [3, 3]})
result = df + values
tm.assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = pd.DataFrame(
np.arange(1, 10, dtype="f8").reshape(3, 3),
columns=["one", "two", "three"],
index=["a", "b", "c"],
)
val1 = df.xs("a").values
added = pd.DataFrame(df.values + val1, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val1, added)
added = pd.DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df["two"])
added = pd.DataFrame(df.values + val2, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val2, added)
added = pd.DataFrame((df.values.T + val2).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val2, axis="index"), added)
val3 = np.random.rand(*df.shape)
added = pd.DataFrame(df.values + val3, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val3), added)
def test_operations_with_interval_categories_index(self, all_arithmetic_operators):
# GH#27415
op = all_arithmetic_operators
ind = pd.CategoricalIndex(pd.interval_range(start=0.0, end=2.0))
data = [1, 2]
df = pd.DataFrame([data], columns=ind)
num = 10
result = getattr(df, op)(num)
expected = pd.DataFrame([[getattr(n, op)(num) for n in data]], columns=ind)
tm.assert_frame_equal(result, expected)
def test_frame_with_frame_reindex(self):
# GH#31623
df = pd.DataFrame(
{
"foo": [pd.Timestamp("2019"), pd.Timestamp("2020")],
"bar": [pd.Timestamp("2018"), pd.Timestamp("2021")],
},
columns=["foo", "bar"],
)
df2 = df[["foo"]]
result = df - df2
expected = pd.DataFrame(
{"foo": [pd.Timedelta(0), pd.Timedelta(0)], "bar": [np.nan, np.nan]},
columns=["bar", "foo"],
)
tm.assert_frame_equal(result, expected)
def test_frame_with_zero_len_series_corner_cases():
# GH#28600
# easy all-float case
df = pd.DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "B"])
ser = pd.Series(dtype=np.float64)
result = df + ser
expected = pd.DataFrame(df.values * np.nan, columns=df.columns)
tm.assert_frame_equal(result, expected)
result = df == ser
expected = | pd.DataFrame(False, index=df.index, columns=df.columns) | pandas.DataFrame |
import numpy as np
import jinja2
import bokeh
import pandas as pd
from astropy.table import Table
import bokeh
import bokeh.plotting as bk
import bokeh.palettes as bp
from bokeh.models import TapTool, HelpTool, OpenURL
from bokeh.models import ColumnDataSource
from ..plots.fiber import plot_fibers_focalplane, plot_fibernums
# from ..plots.core import get_colors
def plot_camfib_focalplane(cds, attribute, cameras, percentiles={},
zmaxs={}, zmins={}, titles={},
tools='pan,box_zoom,reset'):
'''
ARGS:
cds : ColumnDataSource of data
attribute : string corresponding to column name in DATA
cameras : list of string representing unique camera values
Options:
percentiles : dictionary of cameras corresponding to (min,max)
to clip data for histogram
zmaxs : dictionary of cameras corresponding to hardcoded max values
to clip data for histogram
zmins : dictionary of cameras corresponding to hardcoded min values
to clip data for histogram
titles : dictionary of titles per camera for a group of camfiber plots
where key-value pairs represent a camera-attribute plot title
tools, tooltips : supported plot interactivity features
'''
if attribute not in list(cds.data.keys()):
raise ValueError('{} not in cds.data.keys'.format(attribute))
metric = np.array(cds.data.get(attribute), copy=True)
#- for hover tool
attr_formatted_str = "@" + attribute + '{(0.00 a)}'
tooltips = [("FIBER", "@FIBER"), ("(X, Y)", "(@X, @Y)"),
(attribute, attr_formatted_str)]
figs_list = []
hfigs_list = []
for i in range(len(cameras)):
c = cameras[i]
first_x_range = bokeh.models.Range1d(-420, 420)
first_y_range = bokeh.models.Range1d(-420, 420)
#- shared ranges to support linked features
if not figs_list:
fig_x_range = first_x_range
fig_y_range = first_y_range
else:
fig_x_range = figs_list[0].x_range
fig_y_range = figs_list[0].y_range
colorbar = True
# adjusts for outliers on the scale of the camera
in_cam = np.char.upper(np.array(cds.data['CAM']).astype(str)) == c.upper()
cam_metric = metric[in_cam]
pmin, pmax = np.percentile(cam_metric, (2.5, 97.5))
hist_x_range = (pmin * 0.99, pmax * 1.01)
fig, hfig = plot_fibers_focalplane(cds, attribute, cam=c,
percentile=percentiles.get(c),
zmin=zmins.get(c), zmax=zmaxs.get(c),
title=titles.get(c, {}).get(attribute),
tools=tools, hist_x_range=hist_x_range,
fig_x_range=fig_x_range, fig_y_range=fig_y_range,
colorbar=colorbar)
# Add HelpTool redirection to the DESI wiki.
fig.add_tools(HelpTool(help_tooltip='See the DESI wiki for details\non Focalplane QA',
redirect='https://desi.lbl.gov/trac/wiki/DESIOperations/NightWatch/NightWatchDescription#Focalplane'))
figs_list.append(fig)
hfigs_list.append(hfig)
return figs_list, hfigs_list
def plot_camfib_fot(cds, attribute, cameras, percentiles={},
zmaxs={}, zmins={}, titles={},
tools='pan,box_zoom,reset'):
'''
ARGS:
cds : ColumnDataSource of data
attribute : string corresponding to column name in DATA
cameras : list of string representing unique camera values
Options:
percentiles : dictionary of cameras corresponding to (min,max)
to clip data for histogram
zmaxs : dictionary of cameras corresponding to hardcoded max values
to clip data for histogram
zmins : dictionary of cameras corresponding to hardcoded min values
to clip data for histogram
titles : dictionary of titles per camera for a group of camfiber plots
where key-value pairs represent a camera-attribute plot title
tools, tooltips : supported plot interactivity features
'''
if attribute not in list(cds.data.keys()):
raise ValueError('{} not in cds.data.keys'.format(attribute))
metric = np.array(cds.data.get(attribute), copy=True)
#- for hover tool
attr_formatted_str = "@" + attribute + '{(0.00 a)}'
tooltips = [("FIBER", "@FIBER"), ("(X, Y)", "(@X, @Y)"),
(attribute, attr_formatted_str)]
figs_list = []
for i in range(len(cameras)):
c = cameras[i]
first_x_range = bokeh.models.Range1d(-420, 420)
first_y_range = bokeh.models.Range1d(-420, 420)
#- shared ranges to support linked features
if not figs_list:
fig_x_range = first_x_range
fig_y_range = first_y_range
else:
fig_x_range = figs_list[0].x_range
fig_y_range = figs_list[0].y_range
# adjusts for outliers on the scale of the camera
in_cam = np.char.upper(np.array(cds.data['CAM']).astype(str)) == c.upper()
cam_metric = metric[in_cam]
fig, hfig = plot_fibers_focalplane(cds, attribute, cam=c,
percentile=percentiles.get(c),
zmin=zmins.get(c), zmax=zmaxs.get(c),
title=titles.get(c, {}).get(attribute),
palette = list(np.flip(bp.YlGnBu[5])),
tools=tools,
fig_x_range=fig_x_range, fig_y_range=fig_y_range,
colorbar=False, on_target=True)
# Add HelpTool redirection to the DESI wiki.
fig.add_tools(HelpTool(help_tooltip='See the DESI wiki for details\non Fiber positioning',
redirect='https://desi.lbl.gov/trac/wiki/DESIOperations/NightWatch/NightWatchDescription#Positioning'))
figs_list.append(fig)
return figs_list
def plot_camfib_posacc(pcd,attribute,percentiles={},
tools='pan,box_zoom,reset'):
figs_list = []
hfigs_list = []
metric = pd.DataFrame(pcd.data.get(attribute))
pmin, pmax = np.percentile(metric, (2.5, 97.5))
if attribute == 'BLIND':
disabled_data = np.array(pd.DataFrame(pcd.data.get('DISABLED_0')))
disabled = disabled_data[disabled_data == True]
metric = np.array(metric)[disabled_data == False]
metric = metric[(metric>0) & (metric<200)]
pcd_data = pd.DataFrame(pcd.data)
pcd_data = pcd_data[pcd_data['BLIND']>0]
pcd = ColumnDataSource(data=pcd_data)
title = "Blind Move: Max {:.2f}um; RMS {:.2f}um; Disabled {}".format(np.max(list(metric)),np.sqrt(np.square(list(metric)).mean()),len(disabled))
zmax = 200
elif attribute == 'FINAL_MOVE':
disabled_data = np.array(pd.DataFrame(pcd.data.get('DISABLED_1')))
disabled = disabled_data[disabled_data == True]
metric = np.array(metric)[disabled_data == False]
metric = metric[(metric>0) & (metric<50)]
pcd_data = | pd.DataFrame(pcd.data) | pandas.DataFrame |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
to_datetime,
)
import pandas._testing as tm
import pandas.tseries.offsets as offsets
class TestRollingTS:
# rolling time-series friendly
# xref GH13327
def setup_method(self, method):
self.regular = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
).set_index("A")
self.ragged = DataFrame({"B": range(5)})
self.ragged.index = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
def test_doc_string(self):
df = DataFrame(
{"B": [0, 1, 2, np.nan, 4]},
index=[
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
],
)
df
df.rolling("2s").sum()
def test_invalid_window_non_int(self):
# not a valid freq
msg = "passed window foobar is not compatible with a datetimelike index"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="foobar")
# not a datetimelike index
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
self.regular.reset_index().rolling(window="foobar")
@pytest.mark.parametrize("freq", ["2MS", offsets.MonthBegin(2)])
def test_invalid_window_nonfixed(self, freq):
# non-fixed freqs
msg = "\\<2 \\* MonthBegins\\> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window=freq)
@pytest.mark.parametrize("freq", ["1D", offsets.Day(2), "2ms"])
def test_valid_window(self, freq):
self.regular.rolling(window=freq)
@pytest.mark.parametrize("minp", [1.0, "foo", np.array([1, 2, 3])])
def test_invalid_minp(self, minp):
# non-integer min_periods
msg = (
r"local variable 'minp' referenced before assignment|"
"min_periods must be an integer"
)
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="1D", min_periods=minp)
def test_invalid_center_datetimelike(self):
# center is not implemented
msg = "center is not implemented for datetimelike and offset based windows"
with pytest.raises(NotImplementedError, match=msg):
self.regular.rolling(window="1D", center=True)
def test_on(self):
df = self.regular
# not a valid column
msg = (
r"invalid on specified as foobar, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling(window="2s", on="foobar")
# column is valid
df = df.copy()
df["C"] = date_range("20130101", periods=len(df))
df.rolling(window="2d", on="C").sum()
# invalid columns
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
df.rolling(window="2d", on="B")
# ok even though on non-selected
df.rolling(window="2d", on="C").B.sum()
def test_monotonic_on(self):
# on/index must be monotonic
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
assert df.A.is_monotonic
df.rolling("2s", on="A").sum()
df = df.set_index("A")
assert df.index.is_monotonic
df.rolling("2s").sum()
def test_non_monotonic_on(self):
# GH 19248
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
df = df.set_index("A")
non_monotonic_index = df.index.to_list()
non_monotonic_index[0] = non_monotonic_index[3]
df.index = non_monotonic_index
assert not df.index.is_monotonic
msg = "index must be monotonic"
with pytest.raises(ValueError, match=msg):
df.rolling("2s").sum()
df = df.reset_index()
msg = (
r"invalid on specified as A, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling("2s", on="A").sum()
def test_frame_on(self):
df = DataFrame(
{"B": range(5), "C": date_range("20130101 09:00:00", periods=5, freq="3s")}
)
df["A"] = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
# we are doing simulating using 'on'
expected = df.set_index("A").rolling("2s").B.sum().reset_index(drop=True)
result = df.rolling("2s", on="A").B.sum()
tm.assert_series_equal(result, expected)
# test as a frame
# we should be ignoring the 'on' as an aggregation column
# note that the expected is setting, computing, and resetting
# so the columns need to be switched compared
# to the actual result where they are ordered as in the
# original
expected = (
df.set_index("A").rolling("2s")[["B"]].sum().reset_index()[["B", "A"]]
)
result = df.rolling("2s", on="A")[["B"]].sum()
tm.assert_frame_equal(result, expected)
def test_frame_on2(self):
# using multiple aggregation columns
df = DataFrame(
{
"A": [0, 1, 2, 3, 4],
"B": [0, 1, 2, np.nan, 4],
"C": Index(
[
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
),
},
columns=["A", "C", "B"],
)
expected1 = DataFrame(
{"A": [0.0, 1, 3, 3, 7], "B": [0, 1, 3, np.nan, 4], "C": df["C"]},
columns=["A", "C", "B"],
)
result = df.rolling("2s", on="C").sum()
expected = expected1
tm.assert_frame_equal(result, expected)
expected = Series([0, 1, 3, np.nan, 4], name="B")
result = df.rolling("2s", on="C").B.sum()
tm.assert_series_equal(result, expected)
expected = expected1[["A", "B", "C"]]
result = df.rolling("2s", on="C")[["A", "B", "C"]].sum()
tm.assert_frame_equal(result, expected)
def test_basic_regular(self):
df = self.regular.copy()
df.index = date_range("20130101", periods=5, freq="D")
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="1D").sum()
tm.assert_frame_equal(result, expected)
df.index = date_range("20130101", periods=5, freq="2D")
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="2D", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="2D", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1).sum()
result = df.rolling(window="2D").sum()
tm.assert_frame_equal(result, expected)
def test_min_periods(self):
# compare for min_periods
df = self.regular
# these slightly different
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling("2s").sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling("2s", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
def test_closed(self):
# xref GH13965
df = DataFrame(
{"A": [1] * 5},
index=[
Timestamp("20130101 09:00:01"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:04"),
Timestamp("20130101 09:00:06"),
],
)
# closed must be 'right', 'left', 'both', 'neither'
msg = "closed must be 'right', 'left', 'both' or 'neither'"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="2s", closed="blabla")
expected = df.copy()
expected["A"] = [1.0, 2, 2, 2, 1]
result = df.rolling("2s", closed="right").sum()
tm.assert_frame_equal(result, expected)
# default should be 'right'
result = df.rolling("2s").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [1.0, 2, 3, 3, 2]
result = df.rolling("2s", closed="both").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 2, 2, 1]
result = df.rolling("2s", closed="left").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 1, 1, np.nan]
result = df.rolling("2s", closed="neither").sum()
tm.assert_frame_equal(result, expected)
def test_ragged_sum(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 3, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=2).sum()
expected = df.copy()
expected["B"] = [np.nan, np.nan, 3, np.nan, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s").sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="4s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="4s", min_periods=3).sum()
expected = df.copy()
expected["B"] = [np.nan, np.nan, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 6, 10]
tm.assert_frame_equal(result, expected)
def test_ragged_mean(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).mean()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).mean()
expected = df.copy()
expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_median(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).median()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).median()
expected = df.copy()
expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_quantile(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).quantile(0.5)
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).quantile(0.5)
expected = df.copy()
expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_std(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).std(ddof=0)
expected = df.copy()
expected["B"] = [0.0] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="1s", min_periods=1).std(ddof=1)
expected = df.copy()
expected["B"] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s", min_periods=1).std(ddof=0)
expected = df.copy()
expected["B"] = [0.0] + [0.5] * 4
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).std(ddof=1)
expected = df.copy()
expected["B"] = [np.nan, 0.707107, 1.0, 1.0, 1.290994]
tm.assert_frame_equal(result, expected)
def test_ragged_var(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).var(ddof=0)
expected = df.copy()
expected["B"] = [0.0] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="1s", min_periods=1).var(ddof=1)
expected = df.copy()
expected["B"] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s", min_periods=1).var(ddof=0)
expected = df.copy()
expected["B"] = [0.0] + [0.25] * 4
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).var(ddof=1)
expected = df.copy()
expected["B"] = [np.nan, 0.5, 1.0, 1.0, 1 + 2 / 3.0]
tm.assert_frame_equal(result, expected)
def test_ragged_skew(self):
df = self.ragged
result = df.rolling(window="3s", min_periods=1).skew()
expected = df.copy()
expected["B"] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).skew()
expected = df.copy()
expected["B"] = [np.nan] * 2 + [0.0, 0.0, 0.0]
tm.assert_frame_equal(result, expected)
def test_ragged_kurt(self):
df = self.ragged
result = df.rolling(window="3s", min_periods=1).kurt()
expected = df.copy()
expected["B"] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).kurt()
expected = df.copy()
expected["B"] = [np.nan] * 4 + [-1.2]
tm.assert_frame_equal(result, expected)
def test_ragged_count(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).count()
expected = df.copy()
expected["B"] = [1.0, 1, 1, 1, 1]
tm.assert_frame_equal(result, expected)
df = self.ragged
result = df.rolling(window="1s").count()
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).count()
expected = df.copy()
expected["B"] = [1.0, 1, 2, 1, 2]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=2).count()
expected = df.copy()
expected["B"] = [np.nan, np.nan, 2, np.nan, 2]
tm.assert_frame_equal(result, expected)
def test_regular_min(self):
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": [0.0, 1, 2, 3, 4]}
).set_index("A")
result = df.rolling("1s").min()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
df = DataFrame(
{"A": | date_range("20130101", periods=5, freq="s") | pandas.date_range |
import os
import copy
import pytest
import numpy as np
import pandas as pd
import pyarrow as pa
from pyarrow import feather as pf
from pyarrow import parquet as pq
from time_series_transform.io.base import io_base
from time_series_transform.io.numpy import (
from_numpy,
to_numpy
)
from time_series_transform.io.pandas import (
from_pandas,
to_pandas
)
from time_series_transform.io.arrow import (
from_arrow_record_batch,
from_arrow_table,
to_arrow_record_batch,
to_arrow_table
)
from time_series_transform.transform_core_api.base import (
Time_Series_Data,
Time_Series_Data_Collection
)
from time_series_transform.io.parquet import (
from_parquet,
to_parquet
)
from time_series_transform.io.feather import (
from_feather,
to_feather
)
@pytest.fixture(scope = 'class')
def dictList_single():
return {
'time': [1, 2],
'data': [1, 2]
}
@pytest.fixture(scope = 'class')
def dictList_collection():
return {
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_single_expandTime():
return {
'data_1':[1],
'data_2':[2]
}
@pytest.fixture(scope = 'class')
def expect_single_seperateLabel():
return [{
'time': [1, 2],
'data': [1, 2]
},
{
'data_label': [1, 2]
}]
@pytest.fixture(scope = 'class')
def expect_collection_seperateLabel():
return [{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
{
'data_label':[1,2,1,2]
}
]
@pytest.fixture(scope = 'class')
def expect_collection_expandTime():
return {
'pad': {
'data_1':[1,1],
'data_2':[2,np.nan],
'data_3':[np.nan,2],
'category':[1,2]
},
'remove': {
'data_1':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandCategory():
return {
'pad': {
'time':[1,2,3],
'data_1':[1,2,np.nan],
'data_2':[1,np.nan,2]
},
'remove': {
'time':[1],
'data_1':[1],
'data_2':[1]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandFull():
return {
'pad': {
'data_1_1':[1],
'data_2_1':[1],
'data_1_2':[2],
'data_2_2':[np.nan],
'data_1_3':[np.nan],
'data_2_3':[2]
},
'remove': {
'data_1_1':[1],
'data_2_1':[1],
}
}
@pytest.fixture(scope = 'class')
def expect_collection_noExpand():
return {
'ignore':{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
'pad': {
'time': [1,2,3,1,2,3],
'data':[1,2,np.nan,1,np.nan,2],
'category':[1,1,1,2,2,2]
},
'remove': {
'time': [1,1],
'data':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def seq_single():
return {
'time':[1,2,3],
'data':[[1,2,3],[11,12,13],[21,22,23]]
}
@pytest.fixture(scope = 'class')
def seq_collection():
return {
'time':[1,2,1,2],
'data':[[1,2],[1,2],[2,2],[2,2]],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_seq_collection():
return {
'data_1_1':[[1,2]],
'data_2_1':[[2,2]],
'data_1_2':[[1,2]],
'data_2_2':[[2,2]]
}
class Test_base_io:
def test_base_io_from_single(self, dictList_single,expect_single_expandTime):
ExpandTimeAns = expect_single_expandTime
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(ts, 'time', None)
timeSeries = io.from_single(False)
for i in timeSeries:
assert timeSeries[i].tolist() == data[i]
timeSeries = io.from_single(True)
for i in timeSeries:
assert timeSeries[i] == ExpandTimeAns[i]
def test_base_io_to_single(self, dictList_single):
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(data, 'time', None)
assert io.to_single() == ts
def test_base_io_from_collection_expandTime(self, dictList_collection,expect_collection_expandTime):
noChange = dictList_collection
expand = expect_collection_expandTime
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(False,True,'ignore')
timeSeries = io.from_collection(False,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandCategory(self, dictList_collection,expect_collection_expandCategory):
noChange = dictList_collection
expand = expect_collection_expandCategory
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(True,False,'ignore')
timeSeries = io.from_collection(True,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandFull(self, dictList_collection,expect_collection_expandFull):
noChange = dictList_collection
expand = expect_collection_expandFull
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(True,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_to_collection(self, dictList_collection):
dataList = dictList_collection
io = io_base(dataList, 'time', 'category')
testData = io.to_collection()
tsd = Time_Series_Data(dataList,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
assert testData== tsc
def test_base_io_from_collection_no_expand(self,dictList_collection,expect_collection_noExpand):
noChange = dictList_collection
expand = expect_collection_noExpand
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(False,False,'ignore')
for i in timeSeries:
np.testing.assert_array_equal(timeSeries[i],expand['ignore'][i])
timeSeries = io.from_collection(False,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
class Test_Pandas_IO:
def test_from_pandas_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
tsd = Time_Series_Data(data,'time')
testData = from_pandas(df,'time',None)
assert tsd == testData
def test_from_pandas_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = from_pandas(df,'time','category')
assert tsc == testData
def test_to_pandas_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_pandas_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_pandas_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_pandas_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_pandas_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
)
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_pandas_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_pandas(tsd,False,False,'ignore',True)
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_pandas(tsc,False,False,'ignore',True)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
test = to_pandas(tsd,False,False,'ignore',False)
pd.testing.assert_frame_equal(test,df,False)
def test_to_pandas_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_pandas(tsc,False,False,'ignore')
pd.testing.assert_frame_equal(df,test,False)
test = to_pandas(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection)
print(test)
print(full)
test = test.reindex(sorted(df.columns), axis=1)
full = full.reindex(sorted(df.columns), axis=1)
pd.testing.assert_frame_equal(test,full,False)
class Test_Numpy_IO:
def test_from_numpy_single(self,dictList_single):
data = dictList_single
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
numpydata = pd.DataFrame(dictList_single).values
testData = from_numpy(numpydata,0,None)
assert tsd == testData
def test_from_numpy_collection(self,dictList_collection):
data = dictList_collection
numpyData = pd.DataFrame(data).values
numpyDataDict = pd.DataFrame(pd.DataFrame(data).values).to_dict('list')
testData = from_numpy(numpyData,0,2)
tsd = Time_Series_Data(numpyDataDict,0)
assert testData == Time_Series_Data_Collection(tsd,0,2)
def test_to_numpy_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
numpyData = pd.DataFrame(data).values
expandTime = pd.DataFrame(expect_single_expandTime).values
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
np.testing.assert_equal(testData,numpyData)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
np.testing.assert_equal(testData,expandTime)
def test_to_numpy_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
results = expect_collection_expandTime
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
results = expect_collection_expandCategory
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
results = expect_collection_expandFull
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
def test_to_numpy_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
results = expect_collection_noExpand
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
ignore_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='ignore'
)
np.testing.assert_equal(ignore_numpy,pd.DataFrame(results['ignore']).values)
def test_to_numpy_seperateLabel_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_numpy(tsd,False,False,'ignore',True)
print(x)
print(y)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_seperateLabel_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_numpy(tsc,False,False,'ignore',True)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
test = to_numpy(tsd,False,False,'ignore',False)
np.testing.assert_equal(df,test)
def test_to_numpy_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_numpy(tsc,False,False,'ignore')
for i in range(len(test)):
if isinstance(test[i][1],np.ndarray):
test[i][1] = test[i][1].tolist()
np.testing.assert_equal(df,test)
test = to_numpy(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection).values
for i in range(len(test[0])):
if isinstance(test[0][i],np.ndarray):
test[0][i] = test[0][i].tolist()
np.testing.assert_equal(full,test)
class Test_Arrow_IO:
def test_from_arrow_table_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_table_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time','category')
assert tsc == testData
def test_from_arrow_batch_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_batch_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time','category')
assert tsc == testData
def test_to_arrow_table_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_arrow_table(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
).to_pandas()
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_arrow_table(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_arrow_table_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = | pd.DataFrame(expect_collection_expandTime['pad']) | pandas.DataFrame |
import datetime
import streamlit as st
import pandas as pd
import plotly.express as px
import numpy as np
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import requests as rs
import json
import random
import webbrowser
import yfinance as yf
from sys_var import api_list,indicator_symbol_list,graph_type_list
class MyError(Exception) :
# Constructor or Initializer
def __init__(self, value) :
self.value = value
# __str__ is to print() the value
def __str__(self) :
return (repr(self.value))
st.set_page_config(layout='wide')
st.sidebar.title('Financial Analysis Dashboard')
radio_select = st.sidebar.radio('Select from below options', [ 'Indian Stocks','Crypto', 'US Stocks', 'Forex',
"Global stocks and more(Alpha Vantage)",
"Global stocks and more(Yahoo Finance)"])
if radio_select == 'Crypto' :
st.title("CRYPTOCURRENCIES")
col1, col2 = st.columns(2)
with col1 :
digital_data = pd.read_csv("digital_currency_list.csv")
dictio = digital_data.set_index('currency name').T.to_dict('list')
digital_list = digital_data['currency name'].dropna().unique().tolist()
crypto_select1 = st.selectbox("Select a Cryptocurrency", digital_list)
input_value = dictio[crypto_select1][0]
with col2 :
currency_data = pd.read_csv("physical_currency_list.csv")
dictio2 = currency_data.set_index('currency name').T.to_dict('list')
currency_list = currency_data['currency name'].dropna().unique().tolist()
currency_select = st.selectbox("Select Currency Pair", currency_list)
currency_select = dictio2[currency_select][0]
with st.expander('Show Options'):
col3, col4 = st.columns(2)
col5, col6 = st.columns(2)
with col3 :
interval_list = ["1 Minute", "5 Minutes", "15 Minutes", "30 Minutes", "60 Minutes", "1 Day", "1 Week",
"1 Month"]
interval_list1 = ["1 Minute", "5 Minutes", "15 Minutes", "30 Minutes", "60 Minutes"]
interval_list2 = ["1 Day", "1 Week", "1 Month"]
interval_list1_dict = {"1 Minute" : "1min", "5 Minutes" : "5min", "15 Minutes" : "15min",
"30 Minutes" : "30min",
"60 Minutes" : "60min"}
interval_list2_dict = {"1 Day" : "DAILY", "1 Week" : "WEEKLY", "1 Month" : "MONTHLY"}
interval_list21_dict = {"1 Day" : "Daily", "1 Week" : "Weekly", "1 Month" : "Monthly"}
indicator_dict = {"1 Minute" : "1min", "5 Minutes" : "5min", "15 Minutes" : "15min", "30 Minutes" : "30min",
"60 Minutes" : "60min", "1 Day" : "daily", "1 Week" : "weekly", "1 Month" : "monthly"}
interval_select = st.selectbox("Select Interval", interval_list)
with col4 :
graph_type = st.selectbox('Select Graph type', graph_type_list)
flag = 0
if interval_select in interval_list1 :
flag = 1
try :
y_arr = ['Rate']
data = None
if flag == 1 :
data = rs.get("https://www.alphavantage.co/query?function=CRYPTO_INTRADAY&symbol=" + str(
input_value) + "&market=" + str(currency_select) + "&interval=" + interval_list1_dict[
interval_select] + "&apikey=" + random.choice(api_list))
print("jello")
data = data.json()
data = json.dumps(data["Time Series Crypto (" + str(interval_list1_dict[interval_select]) + ")"])
data = pd.read_json(data)
data = data.T.reset_index()
data.rename(columns={'1. open' : 'Open'}, inplace=True)
data.rename(columns={'2. high' : 'High'}, inplace=True)
data.rename(columns={'3. low' : 'Low'}, inplace=True)
data.rename(columns={'4. close' : 'Rate'}, inplace=True)
st.markdown(
"<h1 style='text-align: center; color: red;'>Chart of " + crypto_select1 + " <sub style='font-size: 25px;'>" + input_value + "/" + currency_select + "</sub></h1>",
unsafe_allow_html=True)
if graph_type == 'Line' :
# fig = px.line(data, x="index", y=y_arr, template="ggplot2", labels={"index" : "Date"})
fig = make_subplots(specs=[[{"secondary_y" : True}]])
fig.add_trace(go.Scatter(x=data['index'], y=data['Rate'], name='Rate'),
secondary_y=True)
fig.add_trace(go.Bar(x=data['index'], y=data['5. volume'], name='Volume', opacity=0.5),
secondary_y=False)
fig.update_layout(autosize=False, width=1600, height=800, legend_title="Indicators", font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
))
fig.layout.yaxis2.showgrid = False
st.plotly_chart(fig)
if graph_type == 'Candlesticks' or graph_type == 'OHLC' :
data.rename(columns={'Rate' : 'Close'}, inplace=True)
fig = make_subplots(specs=[[{"secondary_y" : True}]])
# include candlestick with rangeselector
if graph_type == 'Candlesticks':
fig.add_trace(go.Candlestick(x=data['index'],
open=data['Open'], high=data['High'],
low=data['Low'], close=data['Close'], name='Rate'),
secondary_y=True)
if graph_type == 'OHLC':
fig.add_trace(go.Ohlc(x=data['index'],
open=data['Open'], high=data['High'],
low=data['Low'], close=data['Close'], name='Rate'),
secondary_y=True)
# include a go.Bar trace for volumes
fig.add_trace(go.Bar(x=data['index'], y=data['5. volume'], name='Volume', opacity=0.5),
secondary_y=False)
fig.update_layout(autosize=False, width=1600, height=800, legend_title="Indicators", font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
))
fig.layout.yaxis2.showgrid = False
st.plotly_chart(fig)
if graph_type == 'Filled Area' :
fig = px.area(data, x='index', y='Rate', template="ggplot2", labels={"index" : "Date"})
fig.update_layout(autosize=False, width=1600, height=800, legend_title="Indicators", font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
))
st.plotly_chart(fig)
if flag == 0 :
data = rs.get("https://www.alphavantage.co/query?function=DIGITAL_CURRENCY_" + interval_list2_dict[
interval_select] + "&symbol=" + str(
input_value) + "&market=" + str(currency_select) + "&apikey=" + random.choice(api_list))
data = data.json()
data = json.dumps(data["Time Series (Digital Currency " + str(interval_list21_dict[interval_select]) + ")"])
data = pd.read_json(data)
data = data.T.reset_index()
data.rename(columns={'4a. close (' + str(currency_select) + ')' : 'Rate'}, inplace=True)
data.rename(columns={'1a. open (' + str(currency_select) + ')' : 'Open'}, inplace=True)
data.rename(columns={'2a. high (' + str(currency_select) + ')' : 'High'}, inplace=True)
data.rename(columns={'3a. low (' + str(currency_select) + ')' : 'Low'}, inplace=True)
if graph_type != 'Filled Area' :
with col5 :
indicate_select = st.multiselect('Add Indicators', indicator_symbol_list)
interval_sel = indicate_select
with col6 :
time_select = st.number_input('Select indicator time period', max_value=30, min_value=5, step=1)
for i in range(len(interval_sel)) :
data2 = rs.get("https://www.alphavantage.co/query?function=" + interval_sel[i] + "&symbol=" + str(
input_value) + str(currency_select) + "&interval=" + indicator_dict[
interval_select] + "&time_period=" + str(
time_select) + "&series_type=open&apikey=" + random.choice(api_list))
data2 = data2.json()
data2 = json.dumps(data2["Technical Analysis: " + interval_sel[i]])
data2 = pd.read_json(data2)
data2 = data2.T.reset_index()
data = pd.merge(data, data2, on="index", how="left")
y_arr = y_arr + interval_sel
st.markdown(
"<h1 style='text-align: center; color: red;'>Chart of " + crypto_select1 + " <sub style='font-size: 25px;'>" + input_value + "/" + currency_select + "</sub></h1>",
unsafe_allow_html=True)
# fig = px.line(data, x="index", y=y_arr, template="ggplot2", labels={"index" : "Date"})
if graph_type == 'Line' :
fig = make_subplots(specs=[[{"secondary_y" : True}]])
fig.add_trace(go.Scatter(x=data['index'], y=data['Rate'], name='Rate'),
secondary_y=True)
for i in range(len(interval_sel)) :
fig.add_trace(go.Scatter(x=data['index'], y=data[interval_sel[i]], name=interval_sel[i]),
secondary_y=True)
fig.add_trace(go.Bar(x=data['index'], y=data['5. volume'], name='Volume', opacity=0.5),
secondary_y=False)
fig.update_layout(autosize=False, width=1600, height=800, legend_title="Indicators", font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
))
fig.layout.yaxis2.showgrid = False
st.plotly_chart(fig)
if graph_type == 'Candlesticks' or graph_type == 'OHLC' :
data.rename(columns={'Rate' : 'Close'}, inplace=True)
fig = make_subplots(specs=[[{"secondary_y" : True}]])
# include candlestick with rangeselector
if graph_type == 'Candlesticks' :
fig.add_trace(go.Candlestick(x=data['index'],
open=data['Open'], high=data['High'],
low=data['Low'], close=data['Close'], name='Rate'),
secondary_y=True)
if graph_type == 'OHLC' :
fig.add_trace(go.Ohlc(x=data['index'],
open=data['Open'], high=data['High'],
low=data['Low'], close=data['Close'], name='Rate'),
secondary_y=True)
# include a go.Bar trace for volumes
fig.add_trace(go.Bar(x=data['index'], y=data['5. volume'], name='Volume', opacity=0.5),
secondary_y=False)
fig.update_layout(autosize=False, width=1600, height=800, legend_title="Indicators", font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
))
fig.layout.yaxis2.showgrid = False
st.plotly_chart(fig)
if graph_type == 'Filled Area' :
fig = px.area(data, x='index', y='Rate', template="ggplot2", labels={"index" : "Date"})
fig.update_layout(autosize=False, width=1600, height=500, legend_title="Indicators", font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
))
st.plotly_chart(fig)
except Exception as e :
st.info(
"The selected cryptocurrency data is currently unavailable please check your connection or choose any other cryptocurrency(like Bitcoin)")
if radio_select == 'Forex' :
st.title("FOREX")
size_select = st.sidebar.radio('Select output size', ['compact', 'full(uses more data)'])
size_select = size_select.split('(')[0]
col1, col2 = st.columns(2)
with col1 :
digital_data = pd.read_csv("physical_currency_list1.csv")
dictio = digital_data.set_index('currency name').T.to_dict('list')
digital_list = digital_data['currency name'].dropna().unique().tolist()
crypto_select1 = st.selectbox("Select the Currency", digital_list)
input_value = dictio[crypto_select1][0]
with col2 :
currency_data = pd.read_csv("physical_currency_list.csv")
dictio2 = currency_data.set_index('currency name').T.to_dict('list')
currency_list = currency_data['currency name'].dropna().unique().tolist()
currency_select = st.selectbox("Select currency pair", currency_list)
currency_select = dictio2[currency_select][0]
with st.expander('Show Options') :
col3, col4 = st.columns(2)
col5, col6 = st.columns(2)
with col3 :
interval_list = ["1 Day", "1 Week", "1 Month"]
interval_list2_dict = {"1 Day" : "DAILY", "1 Week" : "WEEKLY", "1 Month" : "MONTHLY"}
interval_list21_dict = {"1 Day" : "Daily", "1 Week" : "Weekly", "1 Month" : "Monthly"}
indicator_dict = {"1 Minute" : "1min", "5 Minutes" : "5min", "15 Minutes" : "15min", "30 Minutes" : "30min",
"60 Minutes" : "60min", "1 Day" : "daily", "1 Week" : "weekly", "1 Month" : "monthly"}
interval_select = st.selectbox("Select Interval", interval_list)
with col4 :
graph_type = st.selectbox('Select Graph type', graph_type_list)
flag = 0
try :
y_arr = ['Rate']
data = None
if flag == 0 :
print("https://www.alphavantage.co/query?function=FX_" + interval_list2_dict[
interval_select] + "&from_symbol=" + str(
input_value) + "&to_symbol=" + str(currency_select) + "&apikey=" + random.choice(api_list))
data = rs.get("https://www.alphavantage.co/query?function=FX_" + interval_list2_dict[
interval_select] + "&from_symbol=" + str(
input_value) + "&to_symbol=" + str(
currency_select) + "&outputsize=" + size_select + "&apikey=" + random.choice(api_list))
data = data.json()
print(data)
data = json.dumps(data["Time Series FX (" + str(interval_list21_dict[interval_select]) + ")"])
data = pd.read_json(data)
data = data.T.reset_index()
data.rename(columns={'4. close' : 'Rate'}, inplace=True)
data.rename(columns={'1. open' : 'Open'}, inplace=True)
data.rename(columns={'2. high' : 'High'}, inplace=True)
data.rename(columns={'3. low' : 'Low'}, inplace=True)
if graph_type != 'Filled Area' :
with col5 :
indicate_select = st.multiselect('Add Indicators', indicator_symbol_list)
interval_sel = indicate_select
with col6 :
time_select = st.number_input('Select indicator time period', max_value=30, min_value=5, step=1)
for i in range(len(interval_sel)) :
data2 = rs.get("https://www.alphavantage.co/query?function=" + interval_sel[i] + "&symbol=" + str(
input_value) + str(currency_select) + "&interval=" + indicator_dict[
interval_select] + "&time_period=" + str(
time_select) + "&series_type=open&outputsize=" + size_select + "&apikey=" + random.choice(
api_list))
data2 = data2.json()
data2 = json.dumps(data2["Technical Analysis: " + interval_sel[i]])
data2 = pd.read_json(data2)
data2 = data2.T.reset_index()
data = pd.merge(data, data2, on="index", how="left")
y_arr = y_arr + interval_sel
st.markdown(
"<h1 style='text-align: center; color: red;'>Chart of " + crypto_select1 + " <sub style='font-size: 25px;'>" + input_value + "/" + currency_select + "</sub></h1>",
unsafe_allow_html=True)
# fig = px.line(data, x="index", y=y_arr, template="ggplot2", labels={"index" : "Date"})
if graph_type == 'Line' :
fig = make_subplots(specs=[[{"secondary_y" : True}]])
fig.add_trace(go.Scatter(x=data['index'], y=data['Rate'], name='Rate'),
secondary_y=True)
for i in range(len(interval_sel)) :
fig.add_trace(go.Scatter(x=data['index'], y=data[interval_sel[i]], name=interval_sel[i]),
secondary_y=True)
fig.update_layout(autosize=False, width=1600, height=800, legend_title="Indicators", font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
))
fig.layout.yaxis2.showgrid = False
st.plotly_chart(fig)
if graph_type == 'Candlesticks' or graph_type == 'OHLC' :
data.rename(columns={'Rate' : 'Close'}, inplace=True)
fig = make_subplots(specs=[[{"secondary_y" : True}]])
# include candlestick with rangeselector
if graph_type == 'Candlesticks' :
fig.add_trace(go.Candlestick(x=data['index'],
open=data['Open'], high=data['High'],
low=data['Low'], close=data['Close'], name='Rate'),
secondary_y=True)
if graph_type == 'OHLC' :
fig.add_trace(go.Ohlc(x=data['index'],
open=data['Open'], high=data['High'],
low=data['Low'], close=data['Close'], name='Rate'),
secondary_y=True)
for i in range(len(interval_sel)) :
fig.add_trace(go.Scatter(x=data['index'], y=data[interval_sel[i]], name=interval_sel[i]),
secondary_y=True)
# include a go.Bar trace for volumes
fig.update_layout(autosize=False, width=1600, height=800, legend_title="Indicators", font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
))
fig.layout.yaxis2.showgrid = False
st.plotly_chart(fig)
if graph_type == 'Filled Area' :
fig = px.area(data, x='index', y='Rate', template="ggplot2", labels={"index" : "Date"})
fig.update_layout(autosize=False, width=1600, height=500, legend_title="Indicators", font=dict(
family="Courier New, monospace",
size=18,
color="RebeccaPurple"
))
st.plotly_chart(fig)
except Exception as e :
st.info(
"The selected forex pair data is currently unavailable please check your connection or choose any other pair")
if radio_select == "Global stocks and more(Alpha Vantage)" :
st.title(radio_select)
size_select = st.sidebar.radio('Select output size', ['compact', 'full(uses more data)'])
size_select = size_select.split('(')[0]
keyword = st.text_input("Search by symbol,name or keyword")
if keyword != '' :
print(keyword)
print('https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords=' + str(
keyword) + '&apikey=' + random.choice(api_list))
data = rs.get('https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords=' + str(
keyword) + '&apikey=' + random.choice(api_list))
data = data.json()
# data = pd.read_json(data)
try :
if data["bestMatches"] == [] :
raise (MyError('No financial entity with this name found in our system'))
data = json.dumps(data["bestMatches"])
data = pd.read_json(data)
data.rename(columns={'1. symbol' : 'Symbol'}, inplace=True)
data.rename(columns={'2. name' : 'Name'}, inplace=True)
data.rename(columns={'3. type' : 'Type'}, inplace=True)
data.rename(columns={'4. region' : 'Region'}, inplace=True)
data.rename(columns={'5. marketOpen' : 'Market Open'}, inplace=True)
data.rename(columns={'6. marketClose' : 'Market Close'}, inplace=True)
data.rename(columns={'7. timezone' : 'Timezone'}, inplace=True)
data.rename(columns={'8. currency' : 'Currency'}, inplace=True)
data_ticker = data['Symbol'].tolist()
data_name = data['Name'].tolist()
data_type = data['Type'].tolist()
data_region = data['Region'].tolist()
new_list = []
for i in range(len(data_ticker)) :
s = data_name[i] + "----" + data_ticker[i] + "----" + data_type[i] + "----" + data_region[i]
new_list.append(s)
new_list.insert(0, '--Select from options--')
col1, col2 = st.columns(2)
with col1 :
new_box = st.selectbox("Select from below options", new_list)
if (new_box != '--Select from options--') :
input_value = new_box.split("----")[1]
crypto_select1 = new_box.split("----")[0]
currency_select = data[data['Symbol'] == input_value]['Currency'].tolist()
currency_select1 = currency_select[0]
print(currency_select)
currency_data = | pd.read_csv("physical_currency_list.csv") | pandas.read_csv |
import re
from inspect import isclass
import numpy as np
import pandas as pd
import pytest
from mock import patch
import woodwork as ww
from woodwork.accessor_utils import (
_is_dask_dataframe,
_is_dask_series,
_is_koalas_dataframe,
_is_koalas_series,
init_series,
)
from woodwork.exceptions import (
ColumnNotPresentError,
IndexTagRemovedWarning,
ParametersIgnoredWarning,
TypeConversionError,
TypingInfoMismatchWarning,
WoodworkNotInitError,
)
from woodwork.logical_types import (
URL,
Address,
Age,
AgeFractional,
AgeNullable,
Boolean,
BooleanNullable,
Categorical,
CountryCode,
Datetime,
Double,
EmailAddress,
Filepath,
Integer,
IntegerNullable,
IPAddress,
LatLong,
NaturalLanguage,
Ordinal,
PersonFullName,
PhoneNumber,
PostalCode,
SubRegionCode,
Unknown,
)
from woodwork.table_accessor import (
WoodworkTableAccessor,
_check_index,
_check_logical_types,
_check_partial_schema,
_check_time_index,
_check_unique_column_names,
_check_use_standard_tags,
_infer_missing_logical_types,
)
from woodwork.table_schema import TableSchema
from woodwork.tests.testing_utils import (
is_property,
is_public_method,
to_pandas,
validate_subset_schema,
)
from woodwork.tests.testing_utils.table_utils import assert_schema_equal
from woodwork.utils import import_or_none
dd = import_or_none("dask.dataframe")
ks = import_or_none("databricks.koalas")
def test_check_index_errors(sample_df):
error_message = "Specified index column `foo` not found in dataframe"
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_index(dataframe=sample_df, index="foo")
if isinstance(sample_df, pd.DataFrame):
# Does not check for index uniqueness with Dask
error_message = "Index column must be unique"
with pytest.raises(LookupError, match=error_message):
_check_index(sample_df, index="age")
def test_check_logical_types_errors(sample_df):
error_message = "logical_types must be a dictionary"
with pytest.raises(TypeError, match=error_message):
_check_logical_types(sample_df, logical_types="type")
bad_logical_types_keys = {
"full_name": None,
"age": None,
"birthday": None,
"occupation": None,
}
error_message = re.escape(
"logical_types contains columns that are not present in dataframe: ['birthday', 'occupation']"
)
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_logical_types(sample_df, bad_logical_types_keys)
def test_check_time_index_errors(sample_df):
error_message = "Specified time index column `foo` not found in dataframe"
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_time_index(dataframe=sample_df, time_index="foo")
def test_check_unique_column_names_errors(sample_df):
if _is_koalas_dataframe(sample_df):
pytest.skip("Koalas enforces unique column names")
duplicate_cols_df = sample_df.copy()
if _is_dask_dataframe(sample_df):
duplicate_cols_df = dd.concat(
[duplicate_cols_df, duplicate_cols_df["age"]], axis=1
)
else:
duplicate_cols_df.insert(0, "age", [18, 21, 65, 43], allow_duplicates=True)
with pytest.raises(
IndexError, match="Dataframe cannot contain duplicate columns names"
):
_check_unique_column_names(duplicate_cols_df)
def test_check_use_standard_tags_errors():
error_message = "use_standard_tags must be a dictionary or a boolean"
with pytest.raises(TypeError, match=error_message):
_check_use_standard_tags(1)
def test_accessor_init(sample_df):
assert sample_df.ww.schema is None
sample_df.ww.init()
assert isinstance(sample_df.ww.schema, TableSchema)
def test_accessor_schema_property(sample_df):
sample_df.ww.init()
assert sample_df.ww._schema is not sample_df.ww.schema
assert sample_df.ww._schema == sample_df.ww.schema
def test_set_accessor_name(sample_df):
df = sample_df.copy()
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.name
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.name = "name"
df.ww.init()
assert df.ww.name is None
df.ww.name = "name"
assert df.ww.schema.name == "name"
assert df.ww.name == "name"
def test_rename_init_with_name(sample_df):
df = sample_df.copy()
df.ww.init(name="name")
assert df.ww.name == "name"
df.ww.name = "new_name"
assert df.ww.schema.name == "new_name"
assert df.ww.name == "new_name"
def test_name_error_on_init(sample_df):
err_msg = "Table name must be a string"
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.init(name=123)
def test_name_error_on_update(sample_df):
sample_df.ww.init()
err_msg = "Table name must be a string"
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.name = 123
def test_name_persists_after_drop(sample_df):
df = sample_df.copy()
df.ww.init()
df.ww.name = "name"
assert df.ww.name == "name"
dropped_df = df.ww.drop(["id"])
assert dropped_df.ww.name == "name"
assert dropped_df.ww.schema.name == "name"
def test_set_accessor_metadata(sample_df):
df = sample_df.copy()
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.metadata
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.metadata = {"new": "metadata"}
df.ww.init()
assert df.ww.metadata == {}
df.ww.metadata = {"new": "metadata"}
assert df.ww.schema.metadata == {"new": "metadata"}
assert df.ww.metadata == {"new": "metadata"}
def test_set_metadata_after_init_with_metadata(sample_df):
df = sample_df.copy()
df.ww.init(table_metadata={"new": "metadata"})
assert df.ww.metadata == {"new": "metadata"}
df.ww.metadata = {"new": "new_metadata"}
assert df.ww.schema.metadata == {"new": "new_metadata"}
assert df.ww.metadata == {"new": "new_metadata"}
def test_metadata_persists_after_drop(sample_df):
df = sample_df.copy()
df.ww.init()
df.ww.metadata = {"new": "metadata"}
assert df.ww.metadata == {"new": "metadata"}
dropped_df = df.ww.drop(["id"])
assert dropped_df.ww.metadata == {"new": "metadata"}
assert dropped_df.ww.schema.metadata == {"new": "metadata"}
def test_metadata_error_on_init(sample_df):
err_msg = "Table metadata must be a dictionary."
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.init(table_metadata=123)
def test_metadata_error_on_update(sample_df):
sample_df.ww.init()
err_msg = "Table metadata must be a dictionary."
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.metadata = 123
def test_accessor_physical_types_property(sample_df):
sample_df.ww.init(logical_types={"age": "Categorical"})
assert isinstance(sample_df.ww.physical_types, dict)
assert set(sample_df.ww.physical_types.keys()) == set(sample_df.columns)
for k, v in sample_df.ww.physical_types.items():
logical_type = sample_df.ww.columns[k].logical_type
if _is_koalas_dataframe(sample_df) and logical_type.backup_dtype is not None:
assert v == logical_type.backup_dtype
else:
assert v == logical_type.primary_dtype
def test_accessor_separation_of_params(sample_df):
# mix up order of acccessor and schema params
schema_df = sample_df.copy()
schema_df.ww.init(
name="test_name",
index="id",
semantic_tags={"id": "test_tag"},
time_index="signup_date",
)
assert schema_df.ww.semantic_tags["id"] == {"index", "test_tag"}
assert schema_df.ww.index == "id"
assert schema_df.ww.time_index == "signup_date"
assert schema_df.ww.name == "test_name"
def test_init_with_full_schema(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema", semantic_tags={"id": "test_tag"}, index="id")
schema = schema_df.ww._schema
head_df = schema_df.head(2)
assert head_df.ww.schema is None
head_df.ww.init_with_full_schema(schema=schema)
assert head_df.ww._schema is schema
assert head_df.ww.name == "test_schema"
assert head_df.ww.semantic_tags["id"] == {"index", "test_tag"}
iloc_df = schema_df.loc[[2, 3]]
assert iloc_df.ww.schema is None
iloc_df.ww.init_with_full_schema(schema=schema)
assert iloc_df.ww._schema is schema
assert iloc_df.ww.name == "test_schema"
assert iloc_df.ww.semantic_tags["id"] == {"index", "test_tag"}
# Extra parameters do not take effect
assert isinstance(iloc_df.ww.logical_types["id"], Integer)
def test_accessor_init_errors_methods(sample_df):
methods_to_exclude = ["init", "init_with_full_schema", "init_with_partial_schema"]
public_methods = [
method
for method in dir(sample_df.ww)
if is_public_method(WoodworkTableAccessor, method)
]
public_methods = [
method for method in public_methods if method not in methods_to_exclude
]
method_args_dict = {
"add_semantic_tags": [{"id": "new_tag"}],
"describe": None,
"pop": ["id"],
"describe": None,
"describe_dict": None,
"drop": ["id"],
"get_valid_mi_columns": None,
"mutual_information": None,
"mutual_information_dict": None,
"remove_semantic_tags": [{"id": "new_tag"}],
"rename": [{"id": "new_id"}],
"reset_semantic_tags": None,
"select": [["Double"]],
"set_index": ["id"],
"set_time_index": ["signup_date"],
"set_types": [{"id": "Integer"}],
"to_disk": ["dir"],
"to_dictionary": None,
"value_counts": None,
"infer_temporal_frequencies": None,
}
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
for method in public_methods:
func = getattr(sample_df.ww, method)
method_args = method_args_dict[method]
with pytest.raises(WoodworkNotInitError, match=error):
if method_args:
func(*method_args)
else:
func()
def test_accessor_init_errors_properties(sample_df):
props_to_exclude = ["iloc", "loc", "schema", "_dataframe"]
props = [
prop
for prop in dir(sample_df.ww)
if is_property(WoodworkTableAccessor, prop) and prop not in props_to_exclude
]
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
for prop in props:
with pytest.raises(WoodworkNotInitError, match=error):
getattr(sample_df.ww, prop)
def test_init_accessor_with_schema_errors(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init()
schema = schema_df.ww.schema
iloc_df = schema_df.iloc[:, :-1]
assert iloc_df.ww.schema is None
error = "Provided schema must be a Woodwork.TableSchema object."
with pytest.raises(TypeError, match=error):
iloc_df.ww.init_with_full_schema(schema=int)
error = (
"Woodwork typing information is not valid for this DataFrame: "
"The following columns in the typing information were missing from the DataFrame: {'ip_address'}"
)
with pytest.raises(ValueError, match=error):
iloc_df.ww.init_with_full_schema(schema=schema)
def test_accessor_with_schema_parameter_warning(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema", semantic_tags={"id": "test_tag"}, index="id")
schema = schema_df.ww.schema
head_df = schema_df.head(2)
warning = (
"A schema was provided and the following parameters were ignored: index, "
"time_index, logical_types, already_sorted, semantic_tags, use_standard_tags"
)
with pytest.warns(ParametersIgnoredWarning, match=warning):
head_df.ww.init_with_full_schema(
index="ignored_id",
time_index="ignored_time_index",
logical_types={"ignored": "ltypes"},
already_sorted=True,
semantic_tags={"ignored_id": "ignored_test_tag"},
use_standard_tags={"id": True, "age": False},
schema=schema,
)
assert head_df.ww.name == "test_schema"
assert head_df.ww.semantic_tags["id"] == {"index", "test_tag"}
def test_accessor_getattr(sample_df):
schema_df = sample_df.copy()
# We can access attributes on the Accessor class before the schema is initialized
assert schema_df.ww.schema is None
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
schema_df.ww.index
schema_df.ww.init()
assert schema_df.ww.name is None
assert schema_df.ww.index is None
assert schema_df.ww.time_index is None
assert set(schema_df.ww.columns.keys()) == set(sample_df.columns)
error = re.escape("Woodwork has no attribute 'not_present'")
with pytest.raises(AttributeError, match=error):
sample_df.ww.init()
sample_df.ww.not_present
def test_getitem(sample_df):
df = sample_df
df.ww.init(
time_index="signup_date",
index="id",
name="df_name",
logical_types={"age": "Double"},
semantic_tags={"age": {"custom_tag"}},
)
assert list(df.columns) == list(df.ww.schema.columns)
subset = ["id", "signup_date"]
df_subset = df.ww[subset]
pd.testing.assert_frame_equal(to_pandas(df[subset]), to_pandas(df_subset))
assert subset == list(df_subset.ww._schema.columns)
assert df_subset.ww.index == "id"
assert df_subset.ww.time_index == "signup_date"
subset = ["age", "email"]
df_subset = df.ww[subset]
pd.testing.assert_frame_equal(to_pandas(df[subset]), to_pandas(df_subset))
assert subset == list(df_subset.ww._schema.columns)
assert df_subset.ww.index is None
assert df_subset.ww.time_index is None
assert isinstance(df_subset.ww.logical_types["age"], Double)
assert df_subset.ww.semantic_tags["age"] == {"custom_tag", "numeric"}
subset = df.ww[[]]
assert len(subset.ww.columns) == 0
assert subset.ww.index is None
assert subset.ww.time_index is None
series = df.ww["age"]
pd.testing.assert_series_equal(to_pandas(series), to_pandas(df["age"]))
assert isinstance(series.ww.logical_type, Double)
assert series.ww.semantic_tags == {"custom_tag", "numeric"}
series = df.ww["id"]
pd.testing.assert_series_equal(to_pandas(series), to_pandas(df["id"]))
assert isinstance(series.ww.logical_type, Integer)
assert series.ww.semantic_tags == {"index"}
def test_getitem_init_error(sample_df):
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
sample_df.ww["age"]
def test_getitem_invalid_input(sample_df):
df = sample_df
df.ww.init()
error_msg = r"Column\(s\) '\[1, 2\]' not found in DataFrame"
with pytest.raises(ColumnNotPresentError, match=error_msg):
df.ww[["email", 2, 1]]
error_msg = "Column with name 'invalid_column' not found in DataFrame"
with pytest.raises(ColumnNotPresentError, match=error_msg):
df.ww["invalid_column"]
def test_accessor_equality(sample_df):
# Confirm equality with same schema and same data
schema_df = sample_df.copy()
schema_df.ww.init()
copy_df = schema_df.ww.copy()
assert schema_df.ww == copy_df.ww
# Confirm not equal with different schema but same data
copy_df.ww.set_time_index("signup_date")
assert schema_df.ww != copy_df.ww
# Confirm not equal with same schema but different data - only pandas
loc_df = schema_df.ww.loc[:2, :]
if isinstance(sample_df, pd.DataFrame):
assert schema_df.ww != loc_df
else:
assert schema_df.ww == loc_df
def test_accessor_shallow_equality(sample_df):
metadata_table = sample_df.copy()
metadata_table.ww.init(table_metadata={"user": "user0"})
diff_metadata_table = sample_df.copy()
diff_metadata_table.ww.init(table_metadata={"user": "user2"})
assert diff_metadata_table.ww.__eq__(metadata_table, deep=False)
assert not diff_metadata_table.ww.__eq__(metadata_table, deep=True)
schema = metadata_table.ww.schema
diff_data_table = metadata_table.ww.loc[:2, :]
same_data_table = metadata_table.ww.copy()
assert diff_data_table.ww.schema.__eq__(schema, deep=True)
assert same_data_table.ww.schema.__eq__(schema, deep=True)
assert same_data_table.ww.__eq__(metadata_table.ww, deep=False)
assert same_data_table.ww.__eq__(metadata_table.ww, deep=True)
assert diff_data_table.ww.__eq__(metadata_table.ww, deep=False)
if isinstance(sample_df, pd.DataFrame):
assert not diff_data_table.ww.__eq__(metadata_table.ww, deep=True)
def test_accessor_init_with_valid_string_time_index(time_index_df):
time_index_df.ww.init(name="schema", index="id", time_index="times")
assert time_index_df.ww.name == "schema"
assert time_index_df.ww.index == "id"
assert time_index_df.ww.time_index == "times"
assert isinstance(
time_index_df.ww.columns[time_index_df.ww.time_index].logical_type, Datetime
)
def test_accessor_init_with_numeric_datetime_time_index(time_index_df):
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints", logical_types={"ints": Datetime})
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(
name="schema", time_index="strs", logical_types={"strs": Datetime}
)
assert schema_df.ww.time_index == "ints"
assert schema_df["ints"].dtype == "datetime64[ns]"
def test_accessor_with_numeric_time_index(time_index_df):
# Set a numeric time index on init
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints")
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Integer)
assert date_col.semantic_tags == {"time_index", "numeric"}
# Specify logical type for time index on init
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints", logical_types={"ints": "Double"})
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"time_index", "numeric"}
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="strs", logical_types={"strs": "Double"})
date_col = schema_df.ww.columns["strs"]
assert schema_df.ww.time_index == "strs"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"time_index", "numeric"}
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(time_index="ints", logical_types={"ints": "Categorical"})
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(time_index="letters", logical_types={"strs": "Integer"})
# Set numeric time index after init
schema_df = time_index_df.copy()
schema_df.ww.init(logical_types={"ints": "Double"})
assert schema_df.ww.time_index is None
schema_df.ww.set_time_index("ints")
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"numeric", "time_index"}
def test_numeric_time_index_dtypes(numeric_time_index_df):
numeric_time_index_df.ww.init(time_index="ints")
assert numeric_time_index_df.ww.time_index == "ints"
assert isinstance(numeric_time_index_df.ww.logical_types["ints"], Integer)
assert numeric_time_index_df.ww.semantic_tags["ints"] == {"time_index", "numeric"}
numeric_time_index_df.ww.set_time_index("floats")
assert numeric_time_index_df.ww.time_index == "floats"
assert isinstance(numeric_time_index_df.ww.logical_types["floats"], Double)
assert numeric_time_index_df.ww.semantic_tags["floats"] == {"time_index", "numeric"}
numeric_time_index_df.ww.set_time_index("with_null")
assert numeric_time_index_df.ww.time_index == "with_null"
assert isinstance(
numeric_time_index_df.ww.logical_types["with_null"], IntegerNullable
)
assert numeric_time_index_df.ww.semantic_tags["with_null"] == {
"time_index",
"numeric",
}
def test_accessor_init_with_invalid_string_time_index(sample_df):
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
sample_df.ww.init(name="schema", time_index="full_name")
def test_accessor_init_with_string_logical_types(sample_df):
logical_types = {"full_name": "natural_language", "age": "Double"}
schema_df = sample_df.copy()
schema_df.ww.init(name="schema", logical_types=logical_types)
assert isinstance(schema_df.ww.columns["full_name"].logical_type, NaturalLanguage)
assert isinstance(schema_df.ww.columns["age"].logical_type, Double)
logical_types = {
"full_name": "NaturalLanguage",
"age": "IntegerNullable",
"signup_date": "Datetime",
}
schema_df = sample_df.copy()
schema_df.ww.init(
name="schema", logical_types=logical_types, time_index="signup_date"
)
assert isinstance(schema_df.ww.columns["full_name"].logical_type, NaturalLanguage)
assert isinstance(schema_df.ww.columns["age"].logical_type, IntegerNullable)
assert schema_df.ww.time_index == "signup_date"
def test_int_dtype_inference_on_init():
df = pd.DataFrame(
{
"ints_no_nans": pd.Series([1, 2]),
"ints_nan": pd.Series([1, np.nan]),
"ints_NA": pd.Series([1, pd.NA]),
"ints_NA_specified": pd.Series([1, pd.NA], dtype="Int64"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["ints_no_nans"].dtype == "int64"
assert df["ints_nan"].dtype == "float64"
assert df["ints_NA"].dtype == "category"
assert df["ints_NA_specified"].dtype == "Int64"
def test_bool_dtype_inference_on_init():
df = pd.DataFrame(
{
"bools_no_nans": pd.Series([True, False]),
"bool_nan": pd.Series([True, np.nan]),
"bool_NA": pd.Series([True, pd.NA]),
"bool_NA_specified": pd.Series([True, pd.NA], dtype="boolean"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["bools_no_nans"].dtype == "bool"
assert df["bool_nan"].dtype == "category"
assert df["bool_NA"].dtype == "category"
assert df["bool_NA_specified"].dtype == "boolean"
def test_str_dtype_inference_on_init():
df = pd.DataFrame(
{
"str_no_nans": pd.Series(["a", "b"]),
"str_nan": pd.Series(["a", np.nan]),
"str_NA": pd.Series(["a", pd.NA]),
"str_NA_specified": pd.Series([1, pd.NA], dtype="string"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["str_no_nans"].dtype == "category"
assert df["str_nan"].dtype == "category"
assert df["str_NA"].dtype == "category"
assert df["str_NA_specified"].dtype == "category"
def test_float_dtype_inference_on_init():
df = pd.DataFrame(
{
"floats_no_nans": pd.Series([1.1, 2.2]),
"floats_nan": pd.Series([1.1, np.nan]),
"floats_NA": pd.Series([1.1, pd.NA]),
"floats_nan_specified": pd.Series([1.1, np.nan], dtype="float"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["floats_no_nans"].dtype == "float64"
assert df["floats_nan"].dtype == "float64"
assert df["floats_NA"].dtype == "category"
assert df["floats_nan_specified"].dtype == "float64"
def test_datetime_dtype_inference_on_init():
df = pd.DataFrame(
{
"date_no_nans": pd.Series([pd.to_datetime("2020-09-01")] * 2),
"date_nan": pd.Series([pd.to_datetime("2020-09-01"), np.nan]),
"date_NA": pd.Series([pd.to_datetime("2020-09-01"), pd.NA]),
"date_NaT": pd.Series([pd.to_datetime("2020-09-01"), pd.NaT]),
"date_NA_specified": pd.Series(
[pd.to_datetime("2020-09-01"), pd.NA], dtype="datetime64[ns]"
),
}
)
df.ww.init()
assert df["date_no_nans"].dtype == "datetime64[ns]"
assert df["date_nan"].dtype == "datetime64[ns]"
assert df["date_NA"].dtype == "datetime64[ns]"
assert df["date_NaT"].dtype == "datetime64[ns]"
assert df["date_NA_specified"].dtype == "datetime64[ns]"
def test_datetime_inference_with_format_param():
df = pd.DataFrame(
{
"index": [0, 1, 2],
"dates": ["2019/01/01", "2019/01/02", "2019/01/03"],
"ymd_special": ["2019~01~01", "2019~01~02", "2019~01~03"],
"mdy_special": pd.Series(
["3~11~2000", "3~12~2000", "3~13~2000"], dtype="string"
),
}
)
df.ww.init(
name="df_name",
logical_types={
"ymd_special": Datetime(datetime_format="%Y~%m~%d"),
"mdy_special": Datetime(datetime_format="%m~%d~%Y"),
"dates": Datetime,
},
time_index="ymd_special",
)
assert df["dates"].dtype == "datetime64[ns]"
assert df["ymd_special"].dtype == "datetime64[ns]"
assert df["mdy_special"].dtype == "datetime64[ns]"
assert df.ww.time_index == "ymd_special"
assert isinstance(df.ww["dates"].ww.logical_type, Datetime)
assert isinstance(df.ww["ymd_special"].ww.logical_type, Datetime)
assert isinstance(df.ww["mdy_special"].ww.logical_type, Datetime)
df.ww.set_time_index("mdy_special")
assert df.ww.time_index == "mdy_special"
df = pd.DataFrame(
{
"mdy_special": pd.Series(
["3&11&2000", "3&12&2000", "3&13&2000"], dtype="string"
),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["mdy_special"].dtype == "category"
df.ww.set_types(logical_types={"mdy_special": Datetime(datetime_format="%m&%d&%Y")})
assert df["mdy_special"].dtype == "datetime64[ns]"
df.ww.set_time_index("mdy_special")
assert isinstance(df.ww["mdy_special"].ww.logical_type, Datetime)
assert df.ww.time_index == "mdy_special"
def test_timedelta_dtype_inference_on_init():
df = pd.DataFrame(
{
"delta_no_nans": (
pd.Series([pd.to_datetime("2020-09-01")] * 2)
- pd.to_datetime("2020-07-01")
),
"delta_nan": (
pd.Series([pd.to_datetime("2020-09-01"), np.nan])
- pd.to_datetime("2020-07-01")
),
"delta_NaT": (
pd.Series([pd.to_datetime("2020-09-01"), pd.NaT])
- pd.to_datetime("2020-07-01")
),
"delta_NA_specified": (
pd.Series([pd.to_datetime("2020-09-01"), pd.NA], dtype="datetime64[ns]")
- pd.to_datetime("2020-07-01")
),
}
)
df.ww.init()
assert df["delta_no_nans"].dtype == "timedelta64[ns]"
assert df["delta_nan"].dtype == "timedelta64[ns]"
assert df["delta_NaT"].dtype == "timedelta64[ns]"
assert df["delta_NA_specified"].dtype == "timedelta64[ns]"
def test_sets_category_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series(["a", "b", "c"], name=column_name),
pd.Series(["a", None, "c"], name=column_name),
| pd.Series(["a", np.nan, "c"], name=column_name) | pandas.Series |
"""A script for getting institution details."""
import pandas as pd
from . import linkParse
from urllib.parse import urljoin, urlparse
from lxml import etree
import re
from selenium import webdriver
from lxml import html
import time
import os
from .utilityfunc import Xpath, Image
class InstituteData:
"""docstring for InstituteData."""
def __init__(self, weblink=None):
super(InstituteData, self).__init__()
self.weblink = weblink
self._tree = linkParse.get_html_as_tree(self.weblink)
self._xpaths = {
"atags": (
f"//a[contains(@href, '{'.'.join(urlparse(weblink).netloc.split('.')[-2:])}') "
+ "or (not(starts-with(@href,'http')) "
+ "and not(starts-with(@href, 'javascript')) "
+ "and not(starts-with(@href, '#')))]"
),
"fb": "//a[contains(@href, 'facebook.com/')]",
"fb_first": "//div[contains(@class, 'fb-page') and contains(@data-href,'facebook.com')]/@data-href",
"logo_else": "(//img[attribute::*[contains(., 'logo')]])[position()=1]/@src",
"logo_png": "(//img[attribute::*[contains(., 'logo')] and "
# below substring xpath function checks if the src ends with .png extension
"substring(attribute::*, string-length(attribute::*) - string-length('png') +1) = 'png'])"
"[position()=1]/@src"
}
self.atags_df = self.__a_data__().drop_duplicates()
def __a_data__(self, link=None):
"""Gets a tags and corresponding text."""
structure = pd.DataFrame(columns=["Text", "Hyperlink"])
if not link:
link = self.weblink
tree = self._tree
else:
try:
tree = linkParse.get_html_as_tree(link)
except ConnectionRefusedError:
return None
if tree is None:
return structure
elements = tree.xpath(self._xpaths['atags'])
if elements:
atags = [x.xpath("./@href")[0] if x.xpath("./@href") else "" for x in elements]
atags = [urljoin(link, url=x) for x in atags]
atags_text = [x.text for x in elements]
atags_text = [x.strip() if x else '' for x in atags_text]
atags_df = pd.DataFrame({
"Text": atags_text,
"Hyperlink": atags
})
atags_df = atags_df.loc[atags_df["Text"].apply(lambda x: True if x else False)]
return atags_df
else:
return structure
def edu_web_filter(self):
if self.atags_df.empty:
return False
keywords = self.atags_df.Text.str.contains(
(r"Gallery|Contact US|Home|About[.\s]?Us|History"
r"|Vision|Mission|Scholarship"
r"|Anti[\-\s]?Ragging|Ragging"
r"|Syllabus|Contact[.\s]?Us|Admission|Placements"
r"|Facilities|Library|Hostel|Sports|Wi[-\s]?Fi|Education"
r"|Institution|Academic|Exam|Campus|Graduate|Under[\s\-]?graduate"
r"|Alumni|Research|career|Fees|Engineering|University"),
case=False)
if sum(keywords) >= 4:
return True
else:
print(f"The link {self.weblink} does not seem to be an education institution website.")
return False
def fb_link(self):
"""Gets you the facebook link."""
fb_page_links = self._tree.xpath(self._xpaths["fb_first"])
if not fb_page_links:
fb_page_links = self._tree.xpath(self._xpaths['fb'] + '/@href')
if len(fb_page_links) == 1: # if just one page is found check for page == True
fb_page_link = fb_page_links[0]
tree = linkParse.get_html_as_tree(fb_page_link)
if tree is not None:
if len(tree.xpath("//a[contains(@href, 'ref=page_internal')]")) > 3:
return fb_page_link
else:
print(f"facebook link {fb_page_link} seems to be broken.")
return None
elif len(fb_page_links) > 1: # if more than one page is found
if len(fb_page_links) > 3:
fb_page_links = fb_page_links[0:3] # check first three page
for fb_page_link in fb_page_links:
tree = linkParse.get_html_as_tree(fb_page_link)
if tree is not None:
if len(tree.xpath("//a[contains(@href, 'ref=page_internal')]")) > 3:
return fb_page_link
else:
continue
else:
continue
else:
print("No facebook link found.")
return None
def fb_logo_cover(self):
fb_link = self.fb_link()
if fb_link:
with webdriver.Chrome() as driver:
driver.get(fb_link)
time.sleep(1)
tree = driver.page_source
tree = html.fromstring(tree)
fb_logo_link = tree.xpath("//a[@aria-label='Profile picture']/img/@src")
fb_cover_link = tree.xpath(
'(//*[contains(text(), "Drag to Reposition")]/following::*/img)[position()=1]/@src')
locals()["fb_logo_link"] = fb_logo_link
locals()["fb_cover_link"] = fb_cover_link
if locals()['fb_logo_link'] and locals()['fb_cover_link']:
return ", ".join([fb_logo_link[0], fb_cover_link[0]])
elif locals()['fb_logo_link']:
return locals()['fb_logo_link']
elif locals()['fb_cover_link']:
return locals()['fb_cover_link']
else:
print("No cover or profile found.")
return None
else:
return None
def gallery_link(self):
gallery_link = self.atags_df.loc[
self.atags_df["Text"].str.contains("gallery|photos", case=False)
| self.atags_df["Hyperlink"].str.contains("gallery|photos", case=False)
]
if not gallery_link.empty:
return gallery_link.iloc[0, 1]
else:
print("No gallery link found.")
return None
def gallery_images(self):
gallery_link = self.gallery_link()
if gallery_link:
tree = linkParse.get_html_as_tree(gallery_link)
if tree is not None:
elements = tree.xpath("//img[not(ancestor::footer) and attribute::*[contains(name(), 'src')]]")
if len(elements) >= 3:
xpaths = [etree.ElementTree(tree).getpath(x) for x in elements]
xpath_gallery = Xpath.get_group(pd.Series(xpaths))
if xpath_gallery is None:
return None
xpath_gallery["firstimgXpath"] = xpath_gallery[1].apply(lambda x: '(' + x + ')[1]')
attribute_extract = lambda x: tree.xpath(x)[0].attrib if tree.xpath(x) else None
xpath_gallery["attribs"] = xpath_gallery["firstimgXpath"].apply(attribute_extract)
xpath_gallery = xpath_gallery.loc[~xpath_gallery.attribs.isna()]
xpath_gallery = xpath_gallery.loc[xpath_gallery[0] >= 3]
xpath_gallery["size"] = xpath_gallery["attribs"].apply(
lambda x: Image.get_image_size(x, gallery_link))
xpath_gallery = xpath_gallery.sort_values("size", ascending=False)
if not xpath_gallery.empty:
xpath_gallery = xpath_gallery.iloc[0, 0]
else:
print(f"The gallery section of {gallery_link} does not seem to contain at least 3 images.")
return None
elements = tree.xpath(xpath_gallery)
images = [x.xpath('./@*[contains(name(), "src")]')[0] for x in elements]
images = [x.replace(' ', "%20") for x in images]
images = [urljoin(self.weblink, x) if x else '' for x in images]
return ", ".join(images)
else:
print(f"The page {gallery_link} does not seem to contain at least 3 images.")
else:
print(f"The gallery link {gallery_link} cannot be parsed.")
return None
else:
return None
def logo_link(self):
"""Gets you the logo link"""
logo_link = self._tree.xpath(self._xpaths["logo_png"])
if logo_link:
return urljoin(self.weblink, logo_link[0])
logo_link = self._tree.xpath(self._xpaths["logo_else"])
if logo_link:
return urljoin(self.weblink, logo_link[0])
print("No logo found.")
return None
def contact_link(self):
"""gets you the contact link mentioned on the page."""
contact_links = self.atags_df
if contact_links.empty:
return None
contact_links = contact_links.loc[contact_links['Text'].str.contains(
r'contact', case=False)]
contact_links = contact_links['Hyperlink'].tolist()
if contact_links:
return contact_links[0]
else:
print("No contact link found.")
return None
def blog(self):
"""Gets you the blog/news/article present in the link."""
blog_links = self.atags_df.loc[
self.atags_df['Text'].str.contains(r'blog|news|article', case=False)
]
blog_links = [urlparse(x) for x in blog_links["Hyperlink"]]
blog_links = ["{0}://{1}{2}".format(x.scheme, x.netloc, x.path) for x in blog_links]
blog_links = ", ".join(blog_links)
if not blog_links:
print("No blogs or newsletters found.")
return None
else:
return blog_links
def __filter__(self, contact_text_list):
s = pd.Series(contact_text_list)
# rules
if not s.empty:
less_than_80_char = (s.str.count(r'\w') < 80)
contains_email_pattern = s.str.contains(r"(?:\b[A-z0-9\._%+\-]+@[A-z0-9\.\-]+\.[A-z]{2,4}\b)"
r"|(?:\b[A-z0-9\._%+\-]+\[at\][A-z0-9\.\-]+\.[A-z]{2,4}\b)",
case=False)
contains_tel_pattern = s.str.contains(
r'(?:\+?\s?(?:\d{0,3})?\-?\(?(?:\d{3})\)?[\s\-\.]?)?(?:\d{3})[\s\-\.]?(?:\d{4})[\s\-]?')
does_not_contain_session = ~s.str.contains(r'(?:19|20)\d{2}\-(?:19|20)\d{2}')
contains_keywords = s.str.contains(r'tel\s?\:|phone\s?\:|telephone|mobile', case=False)
contains_pair_of_digits = s.str.count(r'\d{2}') > 2
contains_fax_keyword = s.str.contains(r'fax', case=False)
s = s.loc[
(less_than_80_char
& (contains_email_pattern
| ((contains_tel_pattern & does_not_contain_session)
| (contains_keywords & contains_pair_of_digits))
& ((contains_keywords & contains_fax_keyword)
| (~contains_fax_keyword))
))
].tolist()
return s
else:
return []
def contact_extract(self):
"""Extracts the contact detail on the contact page."""
structure = pd.DataFrame(columns=['Category', 'Details', 'ParentText'])
contact_link = self.contact_link()
if not contact_link:
return structure
else:
xpath_to_target = '//*[not(name()="script")' \
'and not(name()="meta")' \
'and not(name()="head")' \
'and not(name()="html")]'
tree = linkParse.get_html_as_tree(contact_link)
if tree is None:
print(f"Contact link {contact_link} does not seem to be valid.")
return structure
elements = tree.xpath(xpath_to_target)
xpath_address = [etree.ElementTree(tree).getpath(x) for x in elements]
text = [x.xpath('./text()') for x in elements]
text = [[x.strip().replace("\xa0", "") for x in y if y] for y in text]
text_xpath_df = pd.DataFrame({
"Xpath": xpath_address,
"Text": text
})
email_pattern = r"\b[A-z0-9\._%+\-]+@[A-z0-9\.\-]+\.[A-z]{2,4}\b"
text_xpath_df['Text'] = text_xpath_df['Text'].apply(self.__filter__)
text_xpath_df = text_xpath_df.loc[text_xpath_df["Text"].apply(lambda x: bool(x))]
if text_xpath_df.empty:
tel_nodes_elements = tree.xpath('//a[starts-with(@href, "tel:")]')
tel_nodes_xpath = [etree.ElementTree(tree).getpath(x) for x in tel_nodes_elements]
tel_nodes_text = [x.attrib.get("href") for x in tel_nodes_elements]
tel_nodes_text = [x.replace('tel:', "") for x in tel_nodes_text if re.search(r"\d", x)]
if not tel_nodes_text:
return structure
tel_nodes = pd.DataFrame({
"Xpath": tel_nodes_xpath,
"Text": tel_nodes_text
})
text_xpath_df = text_xpath_df.append(tel_nodes)
if text_xpath_df.empty:
return structure
text_xpath_df = pd.concat([text_xpath_df["Xpath"],
text_xpath_df["Text"].apply("sep_here".join).str.split("sep_here",
expand=True)],
axis=1)
text_xpath_df = text_xpath_df.melt(['Xpath'], var_name='Category', value_name="Details").dropna()
text_xpath_df["Details"] = text_xpath_df["Details"].apply(
lambda x: re.findall(email_pattern, x)[0] if re.findall(email_pattern,
x) else x)
text_xpath_df["Category"] = text_xpath_df["Details"].str.contains(email_pattern).apply(
lambda x: "Email" if x else "Phone")
parent_text_all_elements = []
for i in text_xpath_df["Xpath"].tolist():
if tree.xpath(i):
parent_node = tree.xpath(i)[0]
parent_text = parent_node.xpath(".//text()")
parent_text = [x.strip() for x in parent_text]
parent_text = "\n".join(parent_text)
parent_text_all_elements.append(parent_text)
else:
parent_text_all_elements.append("")
text_xpath_df["ParentText"] = parent_text_all_elements
return text_xpath_df.iloc[:, 1:]
def get_institution_data(weblink):
institute_data = InstituteData(weblink)
if institute_data.edu_web_filter():
scraped_data = dict(
Website=institute_data.weblink,
Logo=institute_data.logo_link(),
FacebookLink=institute_data.fb_link(),
FacebookImages=institute_data.fb_logo_cover(),
GalleryLink=institute_data.gallery_link(),
GalleryImages=institute_data.gallery_images(),
Blog=institute_data.blog()
)
scraped_data = | pd.DataFrame(scraped_data, index=[0]) | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
SETS = ['ADP-morph_tuning_VGG16', 'ADP-morph_tuning_X1.7', 'ADP-morph_segtest_VGG16', 'ADP-morph_segtest_X1.7',
'ADP-func_tuning_VGG16', 'ADP-func_tuning_X1.7', 'ADP-func_segtest_VGG16', 'ADP-func_segtest_X1.7',
'VOC2012_VGG16', 'VOC2012_M7',
'DeepGlobe_VGG16', 'DeepGlobe_M7',
'DeepGlobe_balanced_VGG16', 'DeepGlobe_balanced_M7']
# SEC/DSRG
def to_underscore(x):
return x.replace('-VGG16', '_VGG16').replace('-X1.7', '_X1.7').replace('-M7', '_M7')
def to_dash(x):
return x.replace('_VGG16', '-VGG16').replace('_X1.7', '-X1.7').replace('_M7', '-M7')
DIR = '../03a_sec-dsrg/eval'
eval_sec_dsrg = {'SEC': {}, 'DSRG': {}}
def get_miou(fpath):
if not os.path.exists(fpath):
return np.nan
else:
df = pd.read_excel(fpath)
return df['IoU'][df['Class'] == 'Mean'].values[0]
for method in ['SEC', 'DSRG']:
folders = os.listdir(os.path.join(DIR, method))
for folder in folders:
if 'ADP' in folder:
for s in [to_dash(folder.replace('train', 'tuning')), to_dash(folder.replace('train', 'segtest'))]:
fpath = os.path.join(DIR, method, folder, 'metrics_%s.xlsx' % s)
key = to_underscore(s)
eval_sec_dsrg[method][key] = get_miou(fpath)
elif 'DeepGlobe' in folder:
s = to_dash(folder.replace('train_', 'test_'))
fpath = os.path.join(DIR, method, folder, 'metrics_%s.xlsx' % s)
key = folder.replace('_train_', '_')
eval_sec_dsrg[method][key] = get_miou(fpath)
else:
s = to_dash(folder.replace('train_', 'val_'))
fpath = os.path.join(DIR, method, folder, 'metrics_%s.xlsx' % s)
key = to_underscore(s).replace('val_', '')
eval_sec_dsrg[method][key] = get_miou(fpath)
# Grad-CAM/IRNet
DIR = '../03b_irn/eval'
folders = os.listdir(DIR)
eval_cam = {}
eval_irn = {}
def irn_folder_to_key(folder):
if folder.startswith('adp_morph'):
key = 'ADP-morph'
elif folder.startswith('adp_func'):
key = 'ADP-func'
elif folder.startswith('voc12'):
key = 'VOC2012'
elif folder.startswith('deepglobe_balanced'):
key = 'DeepGlobe_balanced'
elif folder.startswith('deepglobe'):
key = 'DeepGlobe'
if folder.endswith('tuning'):
key += '_tuning'
elif folder.endswith('evaluation'):
key += '_segtest'
if 'vgg16' in folder:
key += '_VGG16'
elif 'x1.7' in folder:
key += '_X1.7'
elif 'm7' in folder:
key += '_M7'
return key
for folder in folders:
key = irn_folder_to_key(folder)
if 'cam' in folder:
fname = folder + '_cam_iou.csv'
df = pd.read_csv(os.path.join(DIR, folder, fname))
eval_cam[key] = df[df['Unnamed: 0'] == 'mean']['iou'].values[0]
else:
fname = folder + '_iou.csv'
df = pd.read_csv(os.path.join(DIR, folder, fname))
eval_irn[key] = df[df['Unnamed: 0'] == 'miou']['iou'].values[0]
# HistoSegNet
DIR = '../03c_hsn/eval'
folders = os.listdir(DIR)
eval_hsn = {}
for folder in folders:
assert folder in SETS
fnames = [x for x in os.listdir(os.path.join(DIR, folder)) if x.endswith('.xlsx') and not x.startswith('~')]
assert len(fnames) == 1
fname = fnames[0]
df = pd.read_excel(os.path.join(DIR, folder, fname))
eval_hsn[folder] = df['IoU'][df['Class'] == 'Mean'].values[0]
df_eval = pd.DataFrame({'Grad-CAM': eval_cam, 'SEC': eval_sec_dsrg['SEC'], 'DSRG': eval_sec_dsrg['DSRG'],
'IRNet': eval_irn, 'HistoSegNet': eval_hsn})
| pd.set_option('display.max_columns', None) | pandas.set_option |
# Predicting Heart Disease
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report, make_scorer
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
df = pd.read_csv('data/mlbootcamp5_train.csv',
index_col='id', sep=';')
df.head()
# Convert `age` to be in years
df['age'] = round(df['age'] / 365)
# One-Hot encoding for `cholesterol` and `gluc`
## Unique values for each
df.gluc.unique()
df.cholesterol.unique()
## Using get_dummies from pandas [umm... should you use dropfirst?]
| pd.get_dummies(df.cholesterol, prefix='chol', prefix_sep='_') | pandas.get_dummies |
import streamlit as st
import pandas as pd
from plotly import graph_objs as go
from plotly.subplots import make_subplots
import plotly.express as px
from PIL import Image
# Use the full page instead of a narrow central column
st.set_page_config(
page_title = 'Pakistan Textile Exports',
page_icon = '✅',
layout = 'wide'
)
# ---- HIDE STREAMLIT STYLE ----
hide_st_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
header {visibility: hidden;}
</style>
"""
st.markdown(hide_st_style, unsafe_allow_html=True)
#from pages import garments, knitwear, textile # import your app modules here
st.sidebar.header('Pakistan Textile Exports')
page_names = ['Dashboard', #index =0
'Total Textile Exports', #index=1
'Raw Cotton Exports',
#'Carded/Combed Cotton Exports',
'Cotton Yarn Exports',
'Non-Cotton Yarn Exports',
'Cotton Cloth Exports',
'Knitwear Exports',
'Bedwear Exports',
'Readymade Garments Exports',
'Towel Exports',
'Tents & Tarpaulines Exports',
'Made-ups excluding Bedwear & Towels',
'Artifical Silk & Synthetics Exports',
'Other Textiles Exports',
'Overall Pakistan Trade',
'Cotton Statistics',
]
page = st.sidebar.radio('Navigation', page_names, index=0)
#st.write("**The variable 'page' returns:**", page)
########################################
########################################
if page == 'Cotton Statistics':
#importing csv file as dataframe
df = pd.read_csv('cotton_districts.csv')
#shorten name of a district for better display on chart
#df['District'] = df['District'].replace('Shaheed Benazirabad', 'Benazirabad')
########################################
########################################
#yearly trend chart
########################################
#creating new colum 'Bales_sum' of yearly sum of bales
df_yearly = df.groupby(['Year']).agg(Bales_sum=('Bales', 'sum')).reset_index()
##############################
##############################
fig_cd = go.Figure()
# Add traces
fig_cd.add_trace(go.Bar(x=df_yearly['Year'], y=df_yearly['Bales_sum'],
name='<NAME>',
text=df_yearly['Bales_sum'], #text on bars
textfont_size=24, #text on bars
textfont_family='roboto',
textposition='auto',
texttemplate='%{text:.3s}', # to text shorten into 3 digits, use '%{text:.3s}'
marker_color='#006BA2', #bar colors
hovertemplate='%{x} <br><NAME>: %{y}'
))
from PIL import Image
image = Image.open('logo.png')
#st.image(logo.png)
fig_cd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=1, #image postion on chart
sizex=0.15, sizey=0.15, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_cd.update_layout(
autosize=False, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=40, l=40, r=40),
#title="Cotton Production in Pakistan",
#title_font=dict(size=30, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="No. of Bales",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
)
fig_cd.update_xaxes(showline=True, linewidth=2, linecolor='black')
fig_cd.update_yaxes(showline=True, linewidth=2, linecolor='black')
fig_cd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_cd.update_yaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_cd.update_yaxes(title_font=dict(family='Roboto', color='black', size=24))
#fig_cd.update_xaxes(font=dict(color='#111111', size=24, family="roboto, sans-serif"))
fig_cd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
#title
fig_cd.add_annotation(
text="Cotton Production",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="x domain", yref="y domain",
x=0, y=1.15,
showarrow=False,
arrowhead=1)
#subtitle
fig_cd.add_annotation(
text="in Pakistan over the last few years of the closing season",
font=dict(family='roboto', color='black', size=24),
xref="x domain", yref="y domain",
x=0, y=1.08,
showarrow=False,
arrowhead=1)
#datasource
fig_cd.add_annotation(
text="Source: Pakistan Cotton Ginners Association/National Textile University, Pakistan",
font=dict(family='Roboto', color='#758D99', size=20),
xref="x domain", yref="y domain",
x=0, y=-0.13,
showarrow=False,
arrowhead=1)
st.plotly_chart(fig_cd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
###
#######################################
###############################
#cotton exports and imports
##############################
df_e = pd.read_csv('yearly_cotton_imports_pbs.csv')
df_e['year'] = df_e['year'].astype(str)
##############################
fig = go.Figure()
# Add traces
fig.add_trace(go.Bar(
x=df_e["year"],
y=df_e["imp_bales"],
text=df_e["imp_bales"],
marker_color='#ff6b6c',
name='Imports' #name on legend
))
fig.add_trace(go.Bar(
x=df_e["year"],
y=df_e["exp_bales"],
text=df_e["exp_bales"],
marker_color='#3ebcd2',
name='Exports'
))
fig.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=1, #image postion on chart
sizex=0.15, sizey=0.15, #image size on chart
xanchor="right", yanchor="bottom"
))
fig.update_traces(texttemplate='%{text:.2s}', textposition='auto', textfont_size=24, textfont_family='roboto', textfont_color="#111111")
fig.update_layout(
autosize=False, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=40, l=40, r=40),
xaxis_title='', yaxis_title="No. of Bales",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
)
fig.update_xaxes(showline=True, linewidth=2, linecolor='black')
fig.update_yaxes(showline=True, linewidth=2, linecolor='black')
fig.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(title_font=dict(family='Roboto', color='black', size=24))
#fig_cd.update_xaxes(font=dict(color='#111111', size=24, family="roboto, sans-serif"))
fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
#title
fig.add_annotation(
text="Cotton Exports & Imports",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="x domain", yref="y domain",
x=0, y=1.15,
showarrow=False,
arrowhead=1)
#subtitle
fig.add_annotation(
text="in Pakistan over the last few years of the closing season",
font=dict(family='roboto', color='black', size=24),
xref="x domain", yref="y domain",
x=0, y=1.08,
showarrow=False,
arrowhead=1)
#datasource
fig.add_annotation(
text="Source: Pakistan Bureau of Statistics/National Textile University, Pakistan",
font=dict(family='Roboto', color='#758D99', size=20),
xref="x domain", yref="y domain",
x=0, y=-0.13,
showarrow=False,
arrowhead=1)
fig.update_layout(legend=dict(
orientation="h",
font=dict(family='Roboto', color='#758D99', size=16),
yanchor="bottom",
y=1.02,
xanchor="right",
x=0.8))
st.plotly_chart(fig, use_container_width=True) # to show Figure; container width true makes fig. size responsive
###############################
################################
#monthly cotton imports
#importing csv file as dataframe
df_m = pd.read_csv('monthly_cotton_imports_pbs.csv')
#calculating year-to-date YTD bales and adding new column for the same
df_m['bales_ytd'] = df_m['bales'].cumsum()
df_m['usd_ytd'] = df_m['USD'].cumsum()
##############
fig = go.Figure()
###############
# Add traces
fig.add_trace(go.Bar(x=df_m['month'], y=df_m['bales_ytd'],
name='<NAME>',
text=df_m['bales_ytd'], #text on bars
textfont_size=24, #text on bars
textfont_family='roboto',
textposition='auto',
texttemplate='%{text:.3s}', # to text shorten into 3 digits, use '%{text:.3s}'
marker_color='#006BA2', #bar colors
))
fig.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=1, #image postion on chart
sizex=0.15, sizey=0.15, #image size on chart
xanchor="right", yanchor="bottom"
))
###############
#layout
fig.update_layout(
autosize=False, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=90, l=90, r=40),
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
)
###############
#updates axes
fig.update_xaxes(showline=True, linewidth=2, linecolor='black')
fig.update_yaxes(showline=True, linewidth=2, linecolor='black')
fig.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(title='Cumulative No. of Bales', title_font=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig.add_annotation(
text="<NAME>",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="x domain", yref="y domain",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig.add_annotation(
text="in Pakistan in the current financial year 2021-22",
font=dict(family='roboto', color='black', size=24),
xref="x domain", yref="y domain",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig.add_annotation(
text="Source: Pakistan Bureau of Statistics/National Textile University, Pakistan",
font=dict(family='Roboto', color='#758D99', size=20),
xref="x domain", yref="y domain",
x=0, y=-0.18,
showarrow=False,
arrowhead=1)
######################
#show figure in streamlit web app
st.plotly_chart(fig, use_container_width=True) # to show Figure; container width true makes fig. size responsive
##############################
##############################
##############
fig = go.Figure()
###############
# Add traces
fig.add_trace(go.Bar(x=df_m['month'], y=df_m['usd_ytd'],
name='<NAME>',
text=df_m['usd_ytd'], #text on bars
textfont_size=24, #text on bars
textfont_family='roboto',
textposition='auto',
texttemplate='%{text:.3s}', # to text shorten into 3 digits, use '%{text:.3s}'
marker_color='#ff6b6c', #bar colors
))
fig.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=1, #image postion on chart
sizex=0.15, sizey=0.15, #image size on chart
xanchor="right", yanchor="bottom"
))
###############
#layout
fig.update_layout(
autosize=False, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=90, l=90, r=40),
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
)
###############
#updates axes
fig.update_xaxes(showline=True, linewidth=2, linecolor='black')
fig.update_yaxes(showline=True, linewidth=2, linecolor='black')
fig.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(title='Cumulative US$', title_font=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig.add_annotation(
text="Cost of Cotton Imports",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="x domain", yref="y domain",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig.add_annotation(
text="Cumulative price paid for cotton imports till recent month)",
font=dict(family='roboto', color='black', size=24),
xref="x domain", yref="y domain",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig.add_annotation(
text="Source: Pakistan Bureau of Statistics/National Textile University, Pakistan",
font=dict(family='Roboto', color='#758D99', size=20),
xref="x domain", yref="y domain",
x=0, y=-0.18,
showarrow=False,
arrowhead=1)
######################
#show figure in streamlit web app
st.plotly_chart(fig, use_container_width=True) # to show Figure; container width true makes fig. size responsive
##############################
##############################
##############################
#grouping by province
###############################
df_punjab = df[df['Province'] == 'Punjab']
df_punjab = df_punjab.groupby(['Year']).agg({'Bales':'sum'}).reset_index()
df_sindh = df[df['Province'] == 'Sindh']
df_sindh = df_sindh.groupby(['Year']).agg({'Bales':'sum'}).reset_index()
df_baluchistan = df[df['Province'] == 'Baluchistan']
df_baluchistan = df_baluchistan.groupby(['Year']).agg({'Bales':'sum'}).reset_index()
fig = go.Figure()
fig.add_trace(go.Bar(
x=df_punjab["Year"],
y=df_punjab["Bales"],
text=df_punjab["Bales"],
marker_color='#ff6b6c',
name='Punjab' #name on legend
))
fig.add_trace(go.Bar(
x=df_sindh["Year"],
y=df_sindh["Bales"],
text=df_sindh["Bales"],
marker_color='#3ebcd2',
name='Sindh'
))
fig.add_trace(go.Bar(
x=df_baluchistan["Year"],
y=df_baluchistan["Bales"],
text=df_baluchistan["Bales"],
marker_color='#006BA2',
name='Baluchistan'
))
fig.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=1, #image postion on chart
sizex=0.15, sizey=0.15, #image size on chart
xanchor="right", yanchor="bottom"
))
fig.update_traces(texttemplate='%{text:.2s}', textposition='auto', textfont_size=24, textfont_family='roboto', textfont_color="#111111")
fig.update_layout(
autosize=True, height=650, width=1050,
margin=dict(t=100, b=120, l=40, r=40),
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
bargap=0.2, #value can be An int or float in the interval [0, 1]
)
fig.update_xaxes(showline=True, linewidth=2, linecolor='black')
fig.update_yaxes(showline=True, linewidth=2, linecolor='black')
fig.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(title_font=dict(family='Roboto', color='black', size=24))
#fig_cd.update_xaxes(font=dict(color='#111111', size=24, family="roboto, sans-serif"))
fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
fig.update_yaxes(title="No. of Bales",
title_font=dict(size=25, color='#111111', family="roboto"),
)
#title
fig.add_annotation(
text="Cotton Production",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="x domain", yref="y domain",
x=0, y=1.19,
showarrow=False,
arrowhead=1)
#subtitle
fig.add_annotation(
text=f"in different Pakistani provinces over the last few years",
font=dict(family='roboto', color='black', size=24),
xref="x domain", yref="y domain",
x=0, y=1.11,
showarrow=False,
arrowhead=1)
fig.add_annotation(
text="Source: Pakistan Cotton Ginners Association/National Textile University, Pakistan",
font=dict(family='Roboto', color='#758D99', size=20),
xref="x domain", yref="y domain",
x=0, y=-0.24,
showarrow=False,
arrowhead=1)
fig.update_layout(legend=dict(
orientation="h",
font=dict(family='Roboto', color='#758D99', size=16),
yanchor="bottom",
y=1.08,
xanchor="right",
x=0.8))
st.plotly_chart(fig, use_container_width=True) # to show Figure; container width true makes fig. size responsive
############################
############################
# district-wise chart
###########################
#year = df["Year"]
#latest_year = year.max()
#filter data for latest_year
df_latest_year = df[df['Period'] == '2021-22']
fig = go.Figure()
# Add traces
fig.add_trace(go.Bar(x=df_latest_year['District'], y=df_latest_year['Bales'],
name='<NAME>',
text=df_latest_year['Bales'], #text on bars
textfont_size=24, #text on bars
textfont_family='roboto',
texttemplate='%{text:.3s}', # to text shorten into 3 digits, use '%{text:.3s}'
marker_color='#006BA2', #bar colors
))
fig.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=1, #image postion on chart
sizex=0.15, sizey=0.15, #image size on chart
xanchor="right", yanchor="bottom"
))
fig.update_layout(
autosize=False, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=40, l=40, r=40),
#title="Cotton Production in Pakistan",
#title_font=dict(size=30, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="No. of Bales",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
)
fig.update_xaxes(showline=True, linewidth=2, linecolor='black')
fig.update_yaxes(showline=True, linewidth=2, linecolor='black')
fig.update_xaxes(tickangle=90, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(title_font=dict(family='Roboto', color='black', size=24))
#fig_cd.update_xaxes(font=dict(color='#111111', size=24, family="roboto, sans-serif"))
fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
fig.update_xaxes({'categoryorder':'total descending'})
#title
fig.add_annotation(
text="Cotton Production",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="x domain", yref="y domain",
x=0, y=1.22,
showarrow=False,
arrowhead=1)
#subtitle
fig.add_annotation(
text=f"in different Pakistani districts during 2020-21 season",
font=dict(family='roboto', color='black', size=24),
xref="x domain", yref="y domain",
x=0, y=1.11,
showarrow=False,
arrowhead=1)
fig.add_annotation(
text="Source: Pakistan Cotton Ginners Association/National Textile University, Pakistan",
font=dict(family='Roboto', color='#758D99', size=20),
xref="x domain", yref="y domain",
x=0, y=-0.75,
showarrow=False,
arrowhead=1)
st.plotly_chart(fig, use_container_width=True) # to show Figure; container width true makes fig. size responsive
########################
########################
# montlhy cotton arrivals chart
###########################
#################
df_cotton_arrivals = pd.read_csv('cotton_arrivals.csv')
fig = go.Figure()
fig.add_trace(go.Scatter(
x=df_cotton_arrivals["Date"],
y=df_cotton_arrivals["2018-19"],
name="2018-19",
text=df_cotton_arrivals['2018-19'],
texttemplate='%{text:.3s}', #text shorten into 3 digits
mode="markers+lines",
textposition="bottom right",
textfont=dict(family="roboto, sans-serif", size=18, color="#eca220"),
marker=dict(size=8, color="#eca220"),
line=dict(width=2, color="#eca220"),
))
fig.add_trace(go.Scatter(
x=df_cotton_arrivals["Date"],
y=df_cotton_arrivals["2019-20"],
name="2019-20",
text=df_cotton_arrivals['2019-20'],
texttemplate='%{text:.3s}', #text shorten into 3 digits
mode="markers+lines",
textposition="bottom right",
textfont=dict(family="roboto, sans-serif", size=18, color="#b4bb3b"),
marker=dict(size=8, color="#b4bb3b"),
line=dict(width=2, color="#b4bb3b"),
))
fig.add_trace(go.Scatter(
x=df_cotton_arrivals["Date"],
y=df_cotton_arrivals["2020-21"],
name="2020-21",
text=df_cotton_arrivals['2020-21'],
texttemplate='%{text:.3s}', #text shorten into 3 digits
mode="markers+lines",
textposition="bottom right",
textfont=dict(family="roboto, sans-serif", color="#963c4c", size=18),
marker=dict(size=8, color="#963c4c"),
line=dict(width=2, color="#963c4c"),
))
fig.add_trace(go.Scatter(
x=df_cotton_arrivals["Date"],
y=df_cotton_arrivals["2021-22"],
name="2021-22",
text=df_cotton_arrivals['2021-22'],
texttemplate='%{text:.3s}', # to text shorten into 3 digits, use '%{text:.3s}'
mode="markers+lines+text",
textposition="bottom right",
textfont=dict(family="fjalla one, sans-serif", color="#106ea0", size=20),
marker=dict(size=12, color="#106ea0"),
line=dict(width=5, color="#106ea0")
))
fig.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=1, #image postion on chart
sizex=0.15, sizey=0.15, #image size on chart
xanchor="right", yanchor="bottom"
))
fig.update_layout(
autosize=True, height=650, width=1050,
margin=dict(t=90, b=120, l=40, r=40),
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
bargap=0.2, #value can be An int or float in the interval [0, 1]
)
fig.update_xaxes(showline=True, linewidth=2, linecolor='black')
fig.update_yaxes(showline=True, linewidth=2, linecolor='black')
fig.update_xaxes(tickangle=90, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(title_font=dict(family='Roboto', color='black', size=24))
#fig_cd.update_xaxes(font=dict(color='#111111', size=24, family="roboto, sans-serif"))
fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
fig.update_yaxes(title="Cumulative number of bales",
title_font=dict(size=25, color='#111111', family="roboto"),
)
#title
fig.add_annotation(
text="Monthly Cotton Arrival",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="x domain", yref="y domain",
x=0, y=1.19,
showarrow=False,
arrowhead=1)
#subtitle
fig.add_annotation(
text=f"in Pakistani factories",
font=dict(family='roboto', color='black', size=24),
xref="x domain", yref="y domain",
x=0, y=1.11,
showarrow=False,
arrowhead=1)
fig.add_annotation(
text="Source: Pakistan Cotton Ginners Association/National Textile University, Pakistan",
font=dict(family='Roboto', color='#758D99', size=20),
xref="x domain", yref="y domain",
x=0, y=-0.24,
showarrow=False,
arrowhead=1)
fig.update_layout(legend=dict(
orientation="h",
font=dict(family='Roboto', color='#758D99', size=16),
yanchor="bottom",
y=1.02,
xanchor="right",
x=0.75))
st.plotly_chart(fig, use_container_width=True) # to show Figure; container width true makes fig. size responsive
###############################
##############################
#Historical cotton data
#########################
##############################
df_h = pd.read_csv('cotton_historical.csv')
df_a = pd.read_csv('cotton_area.csv')
fig = go.Figure()
# Add traces
fig.add_trace(go.Scatter(
x=df_h["Year"],
y=df_h["Bales"],
name="",
#text=df_cotton_arrivals['2021-22'],
#texttemplate='%{text:.3s}', # to text shorten into 3 digits, use '%{text:.3s}'
mode="markers+lines",
#textposition="bottom right",
#textfont=dict(family="fjalla one, sans-serif", color="#106ea0", size=20),
marker=dict(size=12, color="#106ea0"),
line=dict(width=5, color="#106ea0"),
showlegend=False
))
fig.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=1, #image postion on chart
sizex=0.15, sizey=0.15, #image size on chart
xanchor="right", yanchor="bottom"
))
fig.update_layout(
autosize=False, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=40, l=40, r=40),
#title="Cotton Production in Pakistan",
#title_font=dict(size=30, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="No. of Bales",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
)
fig.update_xaxes(showline=True, linewidth=2, linecolor='black')
fig.update_yaxes(showline=True, linewidth=2, linecolor='black')
fig.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(title_font=dict(family='Roboto', color='black', size=24))
#fig_cd.update_xaxes(font=dict(color='#111111', size=24, family="roboto, sans-serif"))
fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
#adding range buttons
fig.update_xaxes(
rangeslider_visible = False,
rangeselector = dict(
buttons = list([
dict(count = 1, label = '1Y', step = 'year', stepmode = 'backward'),
dict(count = 2, label = '2Y', step = 'year', stepmode = 'backward'),
dict(count = 5, label = '5Y', step = 'year', stepmode = 'backward'),
#dict(step = 'all')
])))
#title
fig.add_annotation(
text="Cotton Production in Pakistan",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="x domain", yref="y domain",
x=0, y=1.15,
showarrow=False,
arrowhead=1)
#subtitle
fig.add_annotation(
text="since 1947-48 season",
font=dict(family='roboto', color='black', size=24),
xref="x domain", yref="y domain",
x=0, y=1.08,
showarrow=False,
arrowhead=1)
#datasource
fig.add_annotation(
text="Source: Pakistan Cotton Ginners Association/Karachi Cotton Association/National Textile University, Pakistan",
font=dict(family='Roboto', color='#758D99', size=20),
xref="x domain", yref="y domain",
x=0, y=-0.13,
showarrow=False,
arrowhead=1)
#Adding only the last date point value/text
fig.add_trace(go.Scatter(x=[df_h['Year'].iloc[-1]],
y=[df_h['Bales'].iloc[-1]],
text=[df_h['Bales'].iloc[-1]],
name='',
mode='markers+text',
marker=dict(color='red', size=14),
textposition='top right',
textfont=dict(family="fjalla one, sans-serif", color="#006BA2", size=18),
texttemplate='%{text:.3s}', #text shorten into 3 digits
showlegend=False))
#Adding value/text at year 2005, which is at index number 57
fig.add_trace(go.Scatter(x=[df_h['Year'].iloc[57]],
y=[df_h['Bales'].iloc[57]],
text=[df_h['Bales'].iloc[57]],
name='',
mode='markers+text',
marker=dict(color='red', size=14),
textposition='middle right',
textfont=dict(family="fjalla one, sans-serif", color="#006BA2", size=18),
texttemplate='%{text:.3s}', #text shorten into 3 digits
showlegend=False))
st.plotly_chart(fig, use_container_width=True) # to show Figure; container width true makes fig. size responsive
##########################################
############################################
#cotton area
df_a = pd.read_csv('cotton_area.csv')
fig = go.Figure()
# Add traces
fig.add_trace(go.Scatter(
x=df_a["year"],
y=df_a["total_acres"],
name="",
line=dict(width=2, color="red"),
fill='tozeroy',
showlegend=False
))
fig.add_trace(go.Scatter(
x=df_a["year"],
y=df_a["punjab_acres"],
name="",
line=dict(width=2, color="#106ea0"),
fill='tozeroy',
showlegend=False
))
fig.add_trace(go.Scatter(
x=df_a["year"],
y=df_a["sindh_acres"],
name="",
line=dict(width=2, color="green"),
fill='tozeroy',
showlegend=False
))
fig.add_trace(go.Scatter(
x=df_a["year"],
y=df_a["kpk_acres"],
name="",
line=dict(width=2, color="#106ea0"),
fill='tozeroy',
showlegend=False
))
fig.add_trace(go.Scatter(
x=df_a["year"],
y=df_a["baluchistan_acres"],
name="",
line=dict(width=2, color="yellow"),
fill='tozeroy',
showlegend=False
))
fig.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=1, #image postion on chart
sizex=0.15, sizey=0.15, #image size on chart
xanchor="right", yanchor="bottom"
))
fig.update_layout(
autosize=False, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=40, l=40, r=40),
#title="Cotton Production in Pakistan",
#title_font=dict(size=30, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="No. of Acres",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
)
fig.update_xaxes(showline=True, linewidth=2, linecolor='black')
fig.update_yaxes(showline=True, linewidth=2, linecolor='black')
fig.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(title_font=dict(family='Roboto', color='black', size=24))
#fig_cd.update_xaxes(font=dict(color='#111111', size=24, family="roboto, sans-serif"))
fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
#adding range buttons
#title
fig.add_annotation(
text="Cotton Production Area in Pakistan",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="x domain", yref="y domain",
x=0, y=1.15,
showarrow=False,
arrowhead=1)
#subtitle
fig.add_annotation(
text="since 1947-48 season",
font=dict(family='roboto', color='black', size=24),
xref="x domain", yref="y domain",
x=0, y=1.08,
showarrow=False,
arrowhead=1)
#datasource
fig.add_annotation(
text="Source: Agriculture Statistics of Pakistan/Agriculture Marketing Information Service, Punjab",
font=dict(family='Roboto', color='#758D99', size=20),
xref="x domain", yref="y domain",
x=0, y=-0.13,
showarrow=False,
arrowhead=1)
#Adding only the last date point value/text
fig.add_trace(go.Scatter(x=[df_a['year'].iloc[-1]],
y=[df_a['total_acres'].iloc[-1]],
text=[df_a['total_acres'].iloc[-1]],
name='',
mode='markers+text',
marker=dict(color='red', size=14),
textposition='bottom left',
textfont=dict(family="fjalla one, sans-serif", color="white", size=18),
texttemplate='Total:%{text:.3s}', #text shorten into 3 digits
showlegend=False))
fig.add_trace(go.Scatter(x=[df_a['year'].iloc[-1]],
y=[df_a['punjab_acres'].iloc[-1]],
text=[df_a['punjab_acres'].iloc[-1]],
name='',
mode='markers+text',
marker=dict(color='red', size=14),
textposition='bottom left',
textfont=dict(family="fjalla one, sans-serif", color="white", size=18),
texttemplate='Punjab:%{text:.3s}', #text shorten into 3 digits
showlegend=False))
fig.add_trace(go.Scatter(x=[df_a['year'].iloc[-1]],
y=[df_a['sindh_acres'].iloc[-1]],
text=[df_a['sindh_acres'].iloc[-1]],
name='',
mode='markers+text',
marker=dict(color='red', size=14),
textposition='bottom left',
textfont=dict(family="fjalla one, sans-serif", color="white", size=18),
texttemplate='Sindh:%{text:.3s}', #text shorten into 3 digits
showlegend=False))
fig.add_trace(go.Scatter(x=[df_a['year'].iloc[-1]],
y=[df_a['baluchistan_acres'].iloc[-1]],
text=[df_a['baluchistan_acres'].iloc[-1]],
name='',
mode='markers+text',
marker=dict(color='red', size=14),
textposition='middle left',
textfont=dict(family="fjalla one, sans-serif", color="white", size=18),
texttemplate='Baluchistan:%{text:.3s}', #text shorten into 3 digits
showlegend=False))
fig.add_trace(go.Scatter(x=[df_a['year'].iloc[-1]],
y=[df_a['kpk_acres'].iloc[-1]],
text=[df_a['kpk_acres'].iloc[-1]],
name='',
mode='markers+text',
marker=dict(color='red', size=14),
textposition='bottom left',
textfont=dict(family="fjalla one, sans-serif", color="black", size=18),
texttemplate='kpk:%{text:.3s}', #text shorten into 3 digits
showlegend=False))
st.plotly_chart(fig, use_container_width=True) # to show Figure; container width true makes fig. size responsive
###############################
##############################
#correlation matric
df_cor = pd.read_csv('crops_area_correlation_data.csv')
corr = df_cor.corr()
trace = go.Heatmap(z=corr.values,
x=corr.index.values,
y=corr.columns.values,
xgap = 1, # Sets the horizontal gap (in pixels) between bricks
ygap = 1,
)
title = 'Crop Correlation Matrix'
layout = go.Layout(
title_text=title,
title_x=0.5,
xaxis_showgrid=False,
yaxis_showgrid=False,
yaxis_autorange='reversed',
autosize=False, height=650, width=1050,
margin=dict(t=90, b=40, l=40, r=40),
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(family="fjalla one, sans-serif", color="black", size=18),
)
fig=go.Figure(data=[trace], layout=layout)
fig.update_xaxes(tickangle=90, tickfont=dict(family='Roboto', color='black', size=24))
#st.plotly_chart(fig, use_container_width=True) # to show Figure; container width true makes fig. size responsive
##################
import seaborn as sns
import seaborn as sns
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(16, 8))
sns.set(font_scale=1) # font size 2 set size for all seaborn graph labels
heatmap = sns.heatmap(df_cor.corr(), ax=ax, vmin=-1, vmax=1, annot=True, annot_kws={"size": 20}, cmap='BrBG')
heatmap.set_title('Correlation Heatmap', fontdict={'fontsize':24}, pad=12);
st.write(fig)
###########################
############################
fig = plt.figure(figsize=(16, 8))
heatmap = sns.heatmap(df_cor.corr(), vmin=-1, vmax=1, annot=True)
heatmap.set_title('Correlation Heatmap', fontdict={'fontsize':18}, pad=12);
# save heatmap as .png file
# dpi - sets the resolution of the saved image in dots/inches
# bbox_inches - when set to 'tight' - does not allow the labels to be cropped
plt.savefig('heatmap.png', dpi=300, bbox_inches='tight')
#st.write(fig)
#############
###########################
########################
#monthly cotton imports
##############################
#Cotton prices
#############################
import yfinance as yf
from datetime import datetime, timedelta
#data = yf.download(tickers=stock_price, period = ‘how_many_days’, interval = ‘how_long_between_each_check’, rounding= bool)
#data = yf.download(tickers='CT=F', period = '5Y', interval = '1D', rounding= True)
data = yf.download(tickers='CT=F', start = '2017-01-01', end = datetime.now().date(), rounding= True)
#data
data= data.reset_index() # to show date as column header
#data
## getting the live ticker price
# import stock_info module from yahoo_fin
from yahoo_fin import stock_info as si
#to get live price of ticker/cotton CT=F
price = si.get_live_price('CT=F')
prev_close = data.Close.iloc[-2] #iloc[-2] is second last row of res_df ; iloc[0] is first row
##
fig = go.Figure()
fig.add_trace(go.Scatter(x=data['Date'],
y=data['Close'],
name = '',
texttemplate='%{text:.2s}', # to shorten text into 3 digits, use '%{text:.3s}'
))
fig.update_traces(hovertemplate='Date: %{x} <br>Price: %{y} cents per pound') #<br> adds space or shifts to next line; x & y is repected axis value;
fig.add_trace(go.Indicator(
domain={"x": [0, 1], "y": [0.6, 1]},
value=price,
mode="number+delta",
number={"font":{"size":50, "color":'#111111', "family":"roboto"}},
title={"text": "Current Price in cents per pound"},
title_font=dict(size=25, color='#111111', family="roboto"),
delta={"reference": prev_close},
))
fig.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=1, #image postion on chart
sizex=0.15, sizey=0.15, #image size on chart
xanchor="right", yanchor="bottom"
))
fig.update_yaxes(title_text = 'Cents Per Pound', tickprefix = '')
#fig.update_xaxes(showspikes=True, spikecolor="red", spikesnap="cursor", spikemode="across", spikethickness=3) #xaxis spike on hover
#fig.update_yaxes(showspikes=True, spikecolor="red", spikesnap="cursor", spikemode="across", spikethickness=3) #yais spike on hover
fig.update_layout(
autosize=True, height=650, width=1050,
margin=dict(t=90, b=120, l=40, r=40),
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=20, family="roboto, sans-serif"),
bargap=0.2, #value can be An int or float in the interval [0, 1]
)
fig.update_xaxes(showline=True, linewidth=2, linecolor='black')
fig.update_yaxes(showline=True, linewidth=2, linecolor='black')
fig.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(title_font=dict(family='Roboto', color='black', size=24))
#fig_cd.update_xaxes(font=dict(color='#111111', size=24, family="roboto, sans-serif"))
fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
fig.update_yaxes(title="Cents Per Pound",
title_font=dict(size=25, color='#111111', family="roboto"),
)
fig.update_xaxes(
rangeslider_visible = False,
rangeselector = dict(
buttons = list([
dict(count = 1, label = '1W', step = 'day', stepmode = 'backward'),
dict(count = 1, label = '1M', step = 'month', stepmode = 'backward'),
dict(count = 6, label = '6M', step = 'month', stepmode = 'backward'),
dict(count = 1, label = 'YTD', step = 'year', stepmode = 'todate'),
dict(count = 1, label = '1Y', step = 'year', stepmode = 'backward'),
dict(count = 2, label = '2Y', step = 'year', stepmode = 'backward'),
dict(count = 5, label = '5Y', step = 'year', stepmode = 'backward'),
#dict(step = 'all')
])))
#title
fig.add_annotation(
text="Cotton Rates/ICE Futures",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="x domain", yref="y domain",
x=0, y=1.19,
showarrow=False,
arrowhead=1)
fig.add_annotation(
text="Source: Yahoo Finance/National Textile University",
font=dict(family='Roboto', color='#111111', size=20),
xref="x domain", yref="y domain",
x=0, y=-0.24,
showarrow=False,
arrowhead=1)
st.plotly_chart(fig, use_container_width=True) # to show Figure; container width true makes fig. size responsive
###########################
###########################
#cotton map of Pakistan
###########################
import json
pak_districts = json.load(open("pakistan_districts.geojson", 'r'))
district_id_map = {}
for feature in pak_districts["features"]:
feature["id"] = feature["properties"]["objectid"]
district_id_map[feature['properties']['districts']] = feature['id']
df['id']=df['District'].apply(lambda x:district_id_map[x])
#st.title("Cotton Map of Pakistan")
fig = go.Figure(go.Choroplethmapbox(geojson=pak_districts, locations=df.id, z=df.Bales,
text= df['District'],
hoverinfo= 'text+z',
reversescale=True,
))
fig.update_layout(mapbox_style="stamen-terrain",
mapbox_zoom=5.0, mapbox_center = {"lat": 30.3753, "lon": 69.3451})
fig.update_traces(text=df['District'])
fig.update_layout(
autosize=True, height=650, width=1400,
)
fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
#####
#st.plotly_chart(fig)
######################
#######################
st.title("Cotton Map of Pakistan")
#satellite-streets
#############
fig = px.scatter_mapbox(df,
lat="Lat",
lon="Long",
hover_name="District",
hover_data=["District", "Bales"],
color_discrete_sequence=["Red"],
size="Bales",
animation_frame="Year",
zoom=5,
height=300
)
fig.update_layout(mapbox_style="open-street-map")
fig.update_layout(
autosize=True, height=700, width=1400,
)
fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
st.plotly_chart(fig, use_container_width=True)
########################################
elif page == 'Overall Pakistan Trade':
df = pd.read_csv('paktrade_pbs.csv')
##############
fig = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.65,0.35])
###############
# Add traces
fig.add_trace(go.Scatter(
x=df["year"],
y=df["export_US$B"],
name="Exports",
text=df['export_US$B'],
texttemplate='%{text:.3s}', # to text shorten into 3 digits, use '%{text:.3s}'
mode="markers+lines",
textposition="bottom right",
textfont=dict(family="fjalla one, sans-serif", color="green", size=20),
marker=dict(size=12, color="green"),
line=dict(width=5, color="green")), row=1, col=1)
# Add traces
fig.add_trace(go.Scatter(
x=df["year"],
y=df["import_US$B"],
name="Imports",
text=df['import_US$B'],
texttemplate='%{text:.3s}', # to text shorten into 3 digits, use '%{text:.3s}'
mode="markers+lines",
textposition="bottom right",
textfont=dict(family="fjalla one, sans-serif", color="red", size=20),
marker=dict(size=12, color="red"),
line=dict(width=5, color="red")), row=1, col=1)
# Plot MACD trace on 3rd row
#val = df['balance_US$B']
#colors = ['green' if val >= 0
# else 'red' for val in df['balance_US$B']]
fig.add_trace(go.Bar(x=df['year'], y=df['balance_US$B'],
name='Trade Balance',
#text=df['balance_US$B'], #text on bars
#textfont_size=24, #text on bars
#textfont_family='roboto',
#texttemplate='%{text:.3s}', # to text shorten into 3 digits, use '%{text:.3s}'
marker_color='red', #bar colors
), row=2, col=1)
###############
image = Image.open('logo.png')
#st.image(logo.png)
fig.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
#layout
fig.update_layout(
autosize=False, height=650, width=1050,
#legend_traceorder="reversed",
margin=dict(t=60, b=120, l=40, r=40),
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
)
###############
#updates axes
fig.update_xaxes(showline=True, linewidth=8, linecolor='black', row=1, col=1)
fig.update_yaxes(showline=True, linewidth=2, linecolor='black', row=1, col=1)
fig.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1)
fig.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(side='right', title='US$ Billion', title_font=dict(family='Roboto', color='black', size=20), row=1, col=1)
fig.update_yaxes(side='right', title='Trade Balance', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99', row=1, col=1)
fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99', row=2, col=1)
###############
#title
fig.add_annotation(
text="Pakistan Exports and Imports",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="x domain", yref="y domain",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig.add_annotation(
text="1950-51 to 2020-21",
font=dict(family='roboto', color='black', size=24),
xref="x domain", yref="y domain",
x=0, y=1.06,
showarrow=False,
arrowhead=1)
#data reference
fig.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="x domain", yref="y domain",
x=0, y=-0.9,
showarrow=False,
arrowhead=1)
#Adding only the last date point value/text
fig.add_trace(go.Scatter(x=[df['year'].iloc[-1]],
y=[df['export_US$B'].iloc[-1]],
text=[df['export_US$B'].iloc[-1]],
name='',
mode='markers+text',
marker=dict(color='green', size=14),
textposition='top center',
textfont=dict(family="fjalla one, sans-serif", color="green", size=20),
texttemplate='$%{text:.3s}B', #text shorten into 3 digits
showlegend=False))
#Adding only the last date point value/text
fig.add_trace(go.Scatter(x=[df['year'].iloc[-1]],
y=[df['import_US$B'].iloc[-1]],
text=[df['import_US$B'].iloc[-1]],
name='',
mode='markers+text',
marker=dict(color='red', size=14),
textposition='top center',
textfont=dict(family="fjalla one, sans-serif", color="red", size=20),
texttemplate='$%{text:.3s}B', #text shorten into 3 digits
showlegend=False))
#Adding only the last date point value/text
fig.add_trace(go.Scatter(x=[df['year'].iloc[-1]],
y=[df['balance_US$B'].iloc[-1]],
text=[df['balance_US$B'].iloc[-1]],
name='',
mode='markers+text',
marker=dict(color='red', size=14),
textposition='bottom center',
textfont=dict(family="fjalla one, sans-serif", color="red", size=20),
texttemplate='$%{text:.3s}B', #text shorten into 3 digits
showlegend=False), row=2, col=1)
#legend
fig.update_layout(legend=dict(
orientation="h",
font=dict(family='Roboto', color='#758D99', size=16),
yanchor="bottom",
y=1.02,
xanchor="right",
x=1))
######################
#show figure in streamlit web app
st.plotly_chart(fig, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#config={'responsive': True}
##############################
##############################
#######################################
########################################
########################################
#data
df1 = pd.read_csv('monthly_trade.csv')
#calculating year-to-date YTD bales and adding new column for the same
df1['imports_ytd_21_22'] = df1['imports_21_22B'].cumsum()
df1['imports_ytd_20_21'] = df1['imports_20_21B'].cumsum()
df1['exports_ytd_20_21'] = df1['exports_20_21B'].cumsum()
df1['exports_ytd_21_22'] = df1['exports_21_22B'].cumsum()
df1['balance_ytd_20_21'] = df1['balance_20_21B'].cumsum()
df1['balance_ytd_21_22'] = df1['balance_21_22B'].cumsum()
##############
#fig = go.Figure()
# add subplot properties when initializing fig variable
fig = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.65,0.35])
###############
# Add traces
fig.add_trace(go.Scatter(
x=df1["month"],
y=df1["imports_ytd_21_22"],
name="Imports 21-22",
text=df1['imports_ytd_21_22'],
texttemplate='%{text:.3s}B', # to text shorten into 3 digits, use '%{text:.3s}'
mode="markers+lines+text",
textposition="bottom right",
textfont=dict(family="fjalla one, sans-serif", color="red", size=20),
marker=dict(size=12, color="red"),
line=dict(width=5, color="red")), row=1, col=1)
fig.add_trace(go.Scatter(
x=df1["month"],
y=df1["imports_ytd_20_21"],
name="Imports 20-21",
text=df1['imports_ytd_20_21'],
texttemplate='%{text:.3s}B', # to text shorten into 3 digits, use '%{text:.3s}'
mode="markers+lines+text",
textposition="bottom right",
textfont=dict(family="fjalla one, sans-serif", color="brown", size=20),
marker=dict(size=12, color="brown"),
line=dict(width=5, color="brown")), row=1, col=1)
fig.add_trace(go.Scatter(
x=df1["month"],
y=df1["exports_ytd_20_21"],
name="Exports 20-21",
text=df1['exports_ytd_20_21'],
texttemplate='%{text:.3s}B', # to text shorten into 3 digits, use '%{text:.3s}'
mode="markers+lines+text",
textposition="bottom right",
textfont=dict(family="fjalla one, sans-serif", color="lightgreen", size=20),
marker=dict(size=12, color="lightgreen"),
line=dict(width=5, color="lightgreen")), row=1, col=1)
fig.add_trace(go.Scatter(
x=df1["month"],
y=df1["exports_ytd_21_22"],
name="Exports 21-22",
text=df1['exports_ytd_21_22'],
texttemplate='%{text:.3s}B', # to text shorten into 3 digits, use '%{text:.3s}'
mode="markers+lines+text",
textposition="bottom right",
textfont=dict(family="fjalla one, sans-serif", color="green", size=20),
marker=dict(size=12, color="green"),
line=dict(width=5, color="green")), row=1, col=1)
# Plot MACD trace on 3rd row
#val = df['balance_US$B']
#colors = ['green' if val >= 0
# else 'red' for val in df['balance_US$B']]
fig.add_trace(go.Scatter(x=df1['month'], y=df1['balance_ytd_20_21'],
name='Trade Balance 20-21',
text=df1['balance_ytd_20_21'],
texttemplate='%{text:.3s}B', # to text shorten into 3 digits, use '%{text:.3s}'
mode="markers+lines+text",
textposition="top right",
textfont=dict(family="fjalla one, sans-serif", color="lightblue", size=20),
marker=dict(size=12, color="lightblue"),
line=dict(width=5, color="lightblue")), row=2, col=1)
fig.add_trace(go.Scatter(x=df1['month'], y=df1['balance_ytd_21_22'],
name='Trade Balance 21-22',
text=df1['balance_ytd_21_22'],
texttemplate='%{text:.3s}B', # to text shorten into 3 digits, use '%{text:.3s}'
mode="markers+lines+text",
textposition="top right",
textfont=dict(family="fjalla one, sans-serif", color="orange", size=20),
marker=dict(size=12, color="orange"),
line=dict(width=5, color="orange")), row=2, col=1)
###############
image = Image.open('logo.png')
#st.image(logo.png)
fig.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=1, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
#layout
fig.update_layout(
autosize=False, height=650, width=1050,
#legend_traceorder="reversed",
margin=dict(t=80, b=110, l=40, r=40),
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
)
###############
#updates axes
fig.update_xaxes(showline=True, linewidth=8, linecolor='black', row=1, col=1)
fig.update_yaxes(showline=True, linewidth=2, linecolor='black', row=1, col=1)
fig.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1)
fig.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig.update_yaxes(side='right', title='US$ Billion', title_font=dict(family='Roboto', color='black', size=20), row=1, col=1)
fig.update_yaxes(side='right', title='Trade Balance', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99', row=1, col=1)
fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99', row=2, col=1)
###############
#title
fig.add_annotation(
text="Pakistan Exports and Imports",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="x domain", yref="y domain",
x=0, y=1.21,
showarrow=False,
arrowhead=1)
#subtitle
fig.add_annotation(
text="2020-21 vs. 2021-22 (cumulative figures till recent month)",
font=dict(family='roboto', color='black', size=24),
xref="x domain", yref="y domain",
x=0, y=1.09,
showarrow=False,
arrowhead=1)
#data reference
fig.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="x domain", yref="y domain",
x=0, y=-0.9,
showarrow=False,
arrowhead=1)
#legend
fig.update_layout(legend=dict(
orientation="h",
font=dict(family='Roboto', color='#758D99', size=16),
yanchor="bottom",
y=-0.16,
xanchor="right",
x=1))
######################
#show figure in streamlit web app
st.plotly_chart(fig, use_container_width=True) # to show Figure; container width true makes fig. size responsive
##############################
##############################
elif page == 'Raw Cotton Exports':
#####################################
#####################################
# cotton chart US$
df = pd.read_csv('monthly_textile_exports_pbs.csv')
df['date'] = pd.to_datetime(df['date'], infer_datetime_format=True)
df['year'] = df['date'].dt.year #creates a year column out of datetime columns
df['month'] = df['date'].dt.month #creates a month column out of datetime columns
df['year'] = df['year'].astype('string') #converting year column into string for filtering
df['month'] = df['month'].astype('string') #converting year column into string for filtering
df['Fiscal Year'] = df['date'].dt.to_period('Q-JUN').dt.qyear.apply(lambda x: str(x-1) + "-" + str(x))
#adding month column
df['month'] = df['date'].dt.strftime('%b') #creating month names column
#filtering data of fiscal year 2021-22
df_cotton = df.loc[df['category'].isin(['Raw Cotton'])]
#calculating year-to-date YTD exports
df_cotton['Exports_YTD'] = df_cotton.groupby(['Fiscal Year'])['Exports_US$'].cumsum()
df_cotton['pct_change_yoy'] = df_cotton.groupby(['month'])['Exports_YTD'].pct_change()*100
#filtering data of fiscal year 2020-21
df_cotton_2020_21 = df_cotton.loc[df_cotton['Fiscal Year'].isin(['2020-2021'])]
df_cotton_2021_22 = df_cotton.loc[df_cotton['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_cotton_2020_21['month'], y=df_cotton_2020_21['Exports_YTD'],
name='Exports in 2020-21',
text=df_cotton_2020_21['Exports_YTD'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_cotton_2021_22['month'], y=df_cotton_2021_22['Exports_YTD'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_cotton_2021_22['Exports_YTD'],
textposition='bottom right',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_cotton_2021_22['month'],
y=df_cotton_2021_22['pct_change_yoy'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_cotton_2021_22['pct_change_yoy'],
textposition='middle right',
texttemplate="%{text:.3s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (US$)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Raw Cotton Exports by Value",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#####################################
#####################################
# raw cotton chart volume
#filtering data of fiscal year 2021-22
#df_yarn = df.loc[df['category'].isin(['Cotton Yarn'])]
#calculating year-to-date YTD exports
df_cotton['vol_YTD'] = df_cotton.groupby(['Fiscal Year'])['volume'].cumsum()
df_cotton['pct_change_yoy_vol'] = df_cotton.groupby(['month'])['vol_YTD'].pct_change()*100
#filtering data of fiscal year 2020-21
df_cotton_2020_21 = df_cotton.loc[df_cotton['Fiscal Year'].isin(['2020-2021'])]
df_cotton_2021_22 = df_cotton.loc[df_cotton['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_cotton_2020_21['month'], y=df_cotton_2020_21['vol_YTD'],
name='Exports in 2020-21',
text=df_cotton_2020_21['vol_YTD'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_cotton_2021_22['month'], y=df_cotton_2021_22['vol_YTD'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_cotton_2021_22['vol_YTD'],
textposition='bottom right',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_cotton_2021_22['month'],
y=df_cotton_2021_22['pct_change_yoy_vol'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_cotton_2021_22['pct_change_yoy_vol'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (Metric Tons)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Raw Cotton Exports by Volume",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#####################################
#####################################
# cotton chart export price
df_cotton['pct_change_yoy_price'] = df_cotton.groupby(['month'])['unit_price'].pct_change()*100
df_cotton_2020_21 = df_cotton.loc[df_cotton['Fiscal Year'].isin(['2020-2021'])]
df_cotton_2021_22 = df_cotton.loc[df_cotton['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_cotton_2020_21['month'], y=df_cotton_2020_21['unit_price'],
name='Price in 2020-21',
text=df_cotton_2020_21['unit_price'],
textposition='auto',
texttemplate='$%{text:.2f}',
hovertemplate='Exports price:$%{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_cotton_2021_22['month'], y=df_cotton_2021_22['unit_price'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Price in 2021-22',
text=df_cotton_2021_22['unit_price'],
textposition='bottom right',
texttemplate="$%{text:.2f}",
line=dict(color='Green', width=4),
hovertemplate='Exports price:$%{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_cotton_2021_22['month'],
y=df_cotton_2021_22['pct_change_yoy_price'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_cotton_2021_22['pct_change_yoy_price'],
textposition='middle right',
texttemplate="%{text:.2}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='US$ per Kg', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Raw Cotton Average Export Price",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
#st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
elif page == 'Cotton Yarn Exports':
#####################################
#####################################
# yarn chart US$
df = pd.read_csv('monthly_textile_exports_pbs.csv')
df['date'] = pd.to_datetime(df['date'], infer_datetime_format=True)
df['year'] = df['date'].dt.year #creates a year column out of datetime columns
df['month'] = df['date'].dt.month #creates a month column out of datetime columns
df['year'] = df['year'].astype('string') #converting year column into string for filtering
df['month'] = df['month'].astype('string') #converting year column into string for filtering
df['Fiscal Year'] = df['date'].dt.to_period('Q-JUN').dt.qyear.apply(lambda x: str(x-1) + "-" + str(x))
#adding month column
df['month'] = df['date'].dt.strftime('%b') #creating month names column
#filtering data of fiscal year 2021-22
df_yarn = df.loc[df['category'].isin(['Cotton Yarn'])]
#calculating year-to-date YTD exports
df_yarn['Exports_YTD'] = df_yarn.groupby(['Fiscal Year'])['Exports_US$'].cumsum()
df_yarn['pct_change_yoy'] = df_yarn.groupby(['month'])['Exports_YTD'].pct_change()*100
#filtering data of fiscal year 2020-21
df_yarn_2020_21 = df_yarn.loc[df_yarn['Fiscal Year'].isin(['2020-2021'])]
df_yarn_2021_22 = df_yarn.loc[df_yarn['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_yarn_2020_21['month'], y=df_yarn_2020_21['Exports_YTD'],
name='Exports in 2020-21',
text=df_yarn_2020_21['Exports_YTD'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_yarn_2021_22['month'], y=df_yarn_2021_22['Exports_YTD'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_yarn_2021_22['Exports_YTD'],
textposition='top center',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_yarn_2021_22['month'],
y=df_yarn_2021_22['pct_change_yoy'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_yarn_2021_22['pct_change_yoy'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (US$ Billion)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Cotton Yarn Exports by Value",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#####################################
#####################################
# yarn chart volume
#filtering data of fiscal year 2021-22
#df_yarn = df.loc[df['category'].isin(['Cotton Yarn'])]
#calculating year-to-date YTD exports
df_yarn['vol_YTD'] = df_yarn.groupby(['Fiscal Year'])['volume'].cumsum()
df_yarn['pct_change_yoy_vol'] = df_yarn.groupby(['month'])['vol_YTD'].pct_change()*100
#filtering data of fiscal year 2020-21
df_yarn_2020_21 = df_yarn.loc[df_yarn['Fiscal Year'].isin(['2020-2021'])]
df_yarn_2021_22 = df_yarn.loc[df_yarn['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_yarn_2020_21['month'], y=df_yarn_2020_21['vol_YTD'],
name='Exports in 2020-21',
text=df_yarn_2020_21['vol_YTD'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_yarn_2021_22['month'], y=df_yarn_2021_22['vol_YTD'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_yarn_2021_22['vol_YTD'],
textposition='top center',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_yarn_2021_22['month'],
y=df_yarn_2021_22['pct_change_yoy_vol'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_yarn_2021_22['pct_change_yoy_vol'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (Metric Tons)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Cotton Yarn Exports by Volume",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#####################################
#####################################
# yarn chart export price
df_yarn['pct_change_yoy_price'] = df_yarn.groupby(['month'])['unit_price'].pct_change()*100
df_yarn_2020_21 = df_yarn.loc[df_yarn['Fiscal Year'].isin(['2020-2021'])]
df_yarn_2021_22 = df_yarn.loc[df_yarn['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_yarn_2020_21['month'], y=df_yarn_2020_21['unit_price'],
name='Price in 2020-21',
text=df_yarn_2020_21['unit_price'],
textposition='auto',
texttemplate='$%{text:.2f}',
hovertemplate='Exports price:$%{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_yarn_2021_22['month'], y=df_yarn_2021_22['unit_price'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Price in 2021-22',
text=df_yarn_2021_22['unit_price'],
textposition='bottom right',
texttemplate="$%{text:.2f}",
line=dict(color='Green', width=4),
hovertemplate='Exports price:$%{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_yarn_2021_22['month'],
y=df_yarn_2021_22['pct_change_yoy_price'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_yarn_2021_22['pct_change_yoy_price'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='US$ per Kg', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Cotton Yarn Average Export Price",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
elif page == 'Cotton Cloth Exports':
#####################################
#####################################
# cloth chart US$
df = pd.read_csv('monthly_textile_exports_pbs.csv')
df['date'] = pd.to_datetime(df['date'], infer_datetime_format=True)
df['year'] = df['date'].dt.year #creates a year column out of datetime columns
df['month'] = df['date'].dt.month #creates a month column out of datetime columns
df['year'] = df['year'].astype('string') #converting year column into string for filtering
df['month'] = df['month'].astype('string') #converting year column into string for filtering
df['Fiscal Year'] = df['date'].dt.to_period('Q-JUN').dt.qyear.apply(lambda x: str(x-1) + "-" + str(x))
#adding month column
df['month'] = df['date'].dt.strftime('%b') #creating month names column
#filtering data of fiscal year 2021-22
df_cloth = df.loc[df['category'].isin(['Cotton Cloth'])]
#calculating year-to-date YTD exports
df_cloth['Exports_YTD'] = df_cloth.groupby(['Fiscal Year'])['Exports_US$'].cumsum()
df_cloth['pct_change_yoy'] = df_cloth.groupby(['month'])['Exports_YTD'].pct_change()*100
#filtering data of fiscal year 2020-21
df_cloth_2020_21 = df_cloth.loc[df_cloth['Fiscal Year'].isin(['2020-2021'])]
df_cloth_2021_22 = df_cloth.loc[df_cloth['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_cloth_2020_21['month'], y=df_cloth_2020_21['Exports_YTD'],
name='Exports in 2020-21',
text=df_cloth_2020_21['Exports_YTD'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_cloth_2021_22['month'], y=df_cloth_2021_22['Exports_YTD'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_cloth_2021_22['Exports_YTD'],
textposition='top center',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_cloth_2021_22['month'],
y=df_cloth_2021_22['pct_change_yoy'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_cloth_2021_22['pct_change_yoy'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (US$)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Cotton Cloth Exports by Value",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#####################################
#####################################
# cloth chart volume
#filtering data of fiscal year 2021-22
#df_yarn = df.loc[df['category'].isin(['Cotton Yarn'])]
#calculating year-to-date YTD exports
df_cloth['vol_YTD'] = df_cloth.groupby(['Fiscal Year'])['volume'].cumsum()
df_cloth['pct_change_yoy_vol'] = df_cloth.groupby(['month'])['vol_YTD'].pct_change()*100
#filtering data of fiscal year 2020-21
df_cloth_2020_21 = df_cloth.loc[df_cloth['Fiscal Year'].isin(['2020-2021'])]
df_cloth_2021_22 = df_cloth.loc[df_cloth['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_cloth_2020_21['month'], y=df_cloth_2020_21['vol_YTD'],
name='Exports in 2020-21',
text=df_cloth_2020_21['vol_YTD'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_cloth_2021_22['month'], y=df_cloth_2021_22['vol_YTD'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_cloth_2021_22['vol_YTD'],
textposition='top center',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_cloth_2021_22['month'],
y=df_cloth_2021_22['pct_change_yoy_vol'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_cloth_2021_22['pct_change_yoy_vol'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (Metric Tons)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Cotton Cloth Exports by Volume",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#####################################
#####################################
# cloth chart export price
df_cloth['pct_change_yoy_price'] = df_cloth.groupby(['month'])['unit_price'].pct_change()*100
df_cloth_2020_21 = df_cloth.loc[df_cloth['Fiscal Year'].isin(['2020-2021'])]
df_cloth_2021_22 = df_cloth.loc[df_cloth['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_cloth_2020_21['month'], y=df_cloth_2020_21['unit_price'],
name='Price in 2020-21',
text=df_cloth_2020_21['unit_price'],
textposition='auto',
texttemplate='$%{text:.2f}',
hovertemplate='Exports price:$%{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_cloth_2021_22['month'], y=df_cloth_2021_22['unit_price'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Price in 2021-22',
text=df_cloth_2021_22['unit_price'],
textposition='bottom right',
texttemplate="$%{text:.2f}",
line=dict(color='Green', width=4),
hovertemplate='Exports price:$%{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_cloth_2021_22['month'],
y=df_cloth_2021_22['pct_change_yoy_price'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_cloth_2021_22['pct_change_yoy_price'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='US$ per Kg', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Cotton Cloth Average Export Price",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#####################################
elif page == 'Readymade Garments Exports':
#####################################
#####################################
# garment chart value
#importing csv file as dataframe
df = pd.read_csv('monthly_textile_exports_pbs.csv')
df['date'] = pd.to_datetime(df['date'], infer_datetime_format=True)
df['year'] = df['date'].dt.year #creates a year column out of datetime columns
df['month'] = df['date'].dt.month #creates a month column out of datetime columns
df['year'] = df['year'].astype('string') #converting year column into string for filtering
df['month'] = df['month'].astype('string') #converting year column into string for filtering
df['Fiscal Year'] = df['date'].dt.to_period('Q-JUN').dt.qyear.apply(lambda x: str(x-1) + "-" + str(x))
#adding month column
df['month'] = df['date'].dt.strftime('%b') #creating month names column
df_garment = df.loc[df['category'].isin(['Garments'])]
##################################
##################################
#calculating year-to-date YTD exports
df_garment['Exports_YTD'] = df_garment.groupby(['Fiscal Year'])['Exports_US$'].cumsum()
df_garment['pct_change_yoy'] = df_garment.groupby(['month'])['Exports_YTD'].pct_change()*100
#filtering data of fiscal year 2020-21
df_garment_2020_21 = df_garment.loc[df_garment['Fiscal Year'].isin(['2020-2021'])]
df_garment_2021_22 = df_garment.loc[df_garment['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_garment_2020_21['month'], y=df_garment_2020_21['Exports_YTD'],
name='Exports in 2020-21',
text=df_garment_2020_21['Exports_YTD'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_garment_2021_22['month'], y=df_garment_2021_22['Exports_YTD'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_garment_2021_22['Exports_YTD'],
textposition='top center',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_garment_2021_22['month'],
y=df_garment_2021_22['pct_change_yoy'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_garment_2021_22['pct_change_yoy'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
from PIL import Image
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (US$ Billion)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Garment Exports by Value",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
##################################
#calculating year-to-date YTD exports
df_garment['Exports_YTD_vol'] = df_garment.groupby(['Fiscal Year'])['volume'].cumsum()
df_garment['pct_change_yoy_vol'] = df_garment.groupby(['month'])['Exports_YTD_vol'].pct_change()*100
#filtering data of fiscal year 2020-21
df_garment_2020_21 = df_garment.loc[df_garment['Fiscal Year'].isin(['2020-2021'])]
df_garment_2021_22 = df_garment.loc[df_garment['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_garment_2020_21['month'], y=df_garment_2020_21['Exports_YTD_vol'],
name='Exports in 2020-21',
text=df_garment_2020_21['Exports_YTD_vol'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_garment_2021_22['month'], y=df_garment_2021_22['Exports_YTD_vol'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_garment_2021_22['Exports_YTD_vol'],
textposition='top center',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_garment_2021_22['month'],
y=df_garment_2021_22['pct_change_yoy_vol'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_garment_2021_22['pct_change_yoy_vol'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (Dozens)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Garments Exports by Volume",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#####################################
# garment chart export price
df_garment['pct_change_yoy_price'] = df_garment.groupby(['month'])['unit_price'].pct_change()*100
df_garment_2020_21 = df_garment.loc[df_garment['Fiscal Year'].isin(['2020-2021'])]
df_garment_2021_22 = df_garment.loc[df_garment['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_garment_2020_21['month'], y=df_garment_2020_21['unit_price'],
name='Price in 2020-21',
text=df_garment_2020_21['unit_price'],
textposition='auto',
texttemplate='$%{text:.2f}',
hovertemplate='Exports price:$%{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_garment_2021_22['month'], y=df_garment_2021_22['unit_price'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Price in 2021-22',
text=df_garment_2021_22['unit_price'],
textposition='bottom right',
texttemplate="$%{text:.2f}",
line=dict(color='Green', width=4),
hovertemplate='Exports price:$%{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_garment_2021_22['month'],
y=df_garment_2021_22['pct_change_yoy_price'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_garment_2021_22['pct_change_yoy_price'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='US$ per Dozen', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Garments Average Export Price",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#####################################
elif page == 'Knitwear Exports':
#####################################
#####################################
#importing csv file as dataframe
df = pd.read_csv('monthly_textile_exports_pbs.csv')
df['date'] = pd.to_datetime(df['date'], infer_datetime_format=True)
df['year'] = df['date'].dt.year #creates a year column out of datetime columns
df['month'] = df['date'].dt.month #creates a month column out of datetime columns
df['year'] = df['year'].astype('string') #converting year column into string for filtering
df['month'] = df['month'].astype('string') #converting year column into string for filtering
df['Fiscal Year'] = df['date'].dt.to_period('Q-JUN').dt.qyear.apply(lambda x: str(x-1) + "-" + str(x))
#adding month column
df['month'] = df['date'].dt.strftime('%b') #creating month names column
# knitwear chart value
df_knitwear = df.loc[df['category'].isin(['Knitwear'])]
#calculating year-to-date YTD exports
df_knitwear['Exports_YTD'] = df_knitwear.groupby(['Fiscal Year'])['Exports_US$'].cumsum()
df_knitwear['pct_change_yoy'] = df_knitwear.groupby(['month'])['Exports_YTD'].pct_change()*100
#filtering data of fiscal year 2020-21
df_knitwear_2020_21 = df_knitwear.loc[df_knitwear['Fiscal Year'].isin(['2020-2021'])]
df_knitwear_2021_22 = df_knitwear.loc[df_knitwear['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_knitwear_2020_21['month'], y=df_knitwear_2020_21['Exports_YTD'],
name='Exports in 2020-21',
text=df_knitwear_2020_21['Exports_YTD'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_knitwear_2021_22['month'], y=df_knitwear_2021_22['Exports_YTD'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_knitwear_2021_22['Exports_YTD'],
textposition='top center',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_knitwear_2021_22['month'],
y=df_knitwear_2021_22['pct_change_yoy'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_knitwear_2021_22['pct_change_yoy'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
from PIL import Image
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (US$ Billion)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Knitwear Exports by Value",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
###############################################
#############################################
#calculating year-to-date YTD exports by volume
df_knitwear['Exports_YTD_vol'] = df_knitwear.groupby(['Fiscal Year'])['volume'].cumsum()
df_knitwear['pct_change_yoy_vol'] = df_knitwear.groupby(['month'])['Exports_YTD_vol'].pct_change()*100
#filtering data of fiscal year 2020-21
df_knitwear_2020_21 = df_knitwear.loc[df_knitwear['Fiscal Year'].isin(['2020-2021'])]
df_knitwear_2021_22 = df_knitwear.loc[df_knitwear['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_knitwear_2020_21['month'], y=df_knitwear_2020_21['Exports_YTD_vol'],
name='Exports in 2020-21',
text=df_knitwear_2020_21['Exports_YTD_vol'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_knitwear_2021_22['month'], y=df_knitwear_2021_22['Exports_YTD_vol'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_knitwear_2021_22['Exports_YTD_vol'],
textposition='top center',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_knitwear_2021_22['month'],
y=df_knitwear_2021_22['pct_change_yoy_vol'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_knitwear_2021_22['pct_change_yoy_vol'],
textposition='middle right',
texttemplate="%{text:.2f}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (Dozens)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Knitwear Exports by Volume",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#####################################
# garment chart export price
df_knitwear['pct_change_yoy_price'] = df_knitwear.groupby(['month'])['unit_price'].pct_change()*100
df_knitwear_2020_21 = df_knitwear.loc[df_knitwear['Fiscal Year'].isin(['2020-2021'])]
df_knitwear_2021_22 = df_knitwear.loc[df_knitwear['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_knitwear_2020_21['month'], y=df_knitwear_2020_21['unit_price'],
name='Price in 2020-21',
text=df_knitwear_2020_21['unit_price'],
textposition='auto',
texttemplate='$%{text:.2f}',
hovertemplate='Exports price:$%{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_knitwear_2021_22['month'], y=df_knitwear_2021_22['unit_price'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Price in 2021-22',
text=df_knitwear_2021_22['unit_price'],
textposition='bottom right',
texttemplate="$%{text:.2f}",
line=dict(color='Green', width=4),
hovertemplate='Exports price:$%{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_knitwear_2021_22['month'],
y=df_knitwear_2021_22['pct_change_yoy_price'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_knitwear_2021_22['pct_change_yoy_price'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='US$ per Dozen', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Knitwear Average Export Price",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#####################################
#################
elif page == 'Artifical Silk & Synthetics Exports':
#####################################
#####################################
#importing csv file as dataframe
df = pd.read_csv('monthly_textile_exports_pbs.csv')
df['date'] = pd.to_datetime(df['date'], infer_datetime_format=True)
df['year'] = df['date'].dt.year #creates a year column out of datetime columns
df['month'] = df['date'].dt.month #creates a month column out of datetime columns
df['year'] = df['year'].astype('string') #converting year column into string for filtering
df['month'] = df['month'].astype('string') #converting year column into string for filtering
df['Fiscal Year'] = df['date'].dt.to_period('Q-JUN').dt.qyear.apply(lambda x: str(x-1) + "-" + str(x))
#adding month column
df['month'] = df['date'].dt.strftime('%b') #creating month names column
# knitwear chart value
df_s = df.loc[df['category'].isin(['Artificial Silk & Synthetics'])]
#calculating year-to-date YTD exports
df_s['Exports_YTD'] = df_s.groupby(['Fiscal Year'])['Exports_US$'].cumsum()
df_s['pct_change_yoy'] = df_s.groupby(['month'])['Exports_YTD'].pct_change()*100
#filtering data of fiscal year 2020-21
df_s_2020_21 = df_s.loc[df_s['Fiscal Year'].isin(['2020-2021'])]
df_s_2021_22 = df_s.loc[df_s['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_s_2020_21['month'], y=df_s_2020_21['Exports_YTD'],
name='Exports in 2020-21',
text=df_s_2020_21['Exports_YTD'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_s_2021_22['month'], y=df_s_2021_22['Exports_YTD'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_s_2021_22['Exports_YTD'],
textposition='top center',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_s_2021_22['month'],
y=df_s_2021_22['pct_change_yoy'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_s_2021_22['pct_change_yoy'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
from PIL import Image
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (US$ Billion)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Art. Silk & Sythetics Exports by Value",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
###############################################
#############################################
#calculating year-to-date YTD exports by volume
df_s['Exports_YTD_vol'] = df_s.groupby(['Fiscal Year'])['volume'].cumsum()
df_s['pct_change_yoy_vol'] = df_s.groupby(['month'])['Exports_YTD_vol'].pct_change()*100
#filtering data of fiscal year 2020-21
df_s_2020_21 = df_s.loc[df_s['Fiscal Year'].isin(['2020-2021'])]
df_s_2021_22 = df_s.loc[df_s['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_s_2020_21['month'], y=df_s_2020_21['Exports_YTD_vol'],
name='Exports in 2020-21',
text=df_s_2020_21['Exports_YTD_vol'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_s_2021_22['month'], y=df_s_2021_22['Exports_YTD_vol'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_s_2021_22['Exports_YTD_vol'],
textposition='top center',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_s_2021_22['month'],
y=df_s_2021_22['pct_change_yoy_vol'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_s_2021_22['pct_change_yoy_vol'],
textposition='middle right',
texttemplate="%{text:.2f}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (Metric Tons)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Art. Silk & Synthetics Exports by Volume",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#####################################
# tents yarn chart export price
df_s['pct_change_yoy_price'] = df_s.groupby(['month'])['unit_price'].pct_change()*100
df_s_2020_21 = df_s.loc[df_s['Fiscal Year'].isin(['2020-2021'])]
df_s_2021_22 = df_s.loc[df_s['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_s_2020_21['month'], y=df_s_2020_21['unit_price'],
name='Price in 2020-21',
text=df_s_2020_21['unit_price'],
textposition='auto',
texttemplate='$%{text:.2f}',
hovertemplate='Exports price:$%{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_s_2021_22['month'], y=df_s_2021_22['unit_price'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Price in 2021-22',
text=df_s_2021_22['unit_price'],
textposition='bottom right',
texttemplate="$%{text:.2f}",
line=dict(color='Green', width=4),
hovertemplate='Exports price:$%{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_s_2021_22['month'],
y=df_s_2021_22['pct_change_yoy_price'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_s_2021_22['pct_change_yoy_price'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='US$ per Kg', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Art. Silk & Synthetics Average Export Price",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#################
#################
elif page == 'Other Textiles Exports':
#####################################
#####################################
#importing csv file as dataframe
df = pd.read_csv('monthly_textile_exports_pbs.csv')
df['date'] = pd.to_datetime(df['date'], infer_datetime_format=True)
df['year'] = df['date'].dt.year #creates a year column out of datetime columns
df['month'] = df['date'].dt.month #creates a month column out of datetime columns
df['year'] = df['year'].astype('string') #converting year column into string for filtering
df['month'] = df['month'].astype('string') #converting year column into string for filtering
df['Fiscal Year'] = df['date'].dt.to_period('Q-JUN').dt.qyear.apply(lambda x: str(x-1) + "-" + str(x))
#adding month column
df['month'] = df['date'].dt.strftime('%b') #creating month names column
# knitwear chart value
df_o = df.loc[df['category'].isin(['Other Textiles'])]
#calculating year-to-date YTD exports
df_o['Exports_YTD'] = df_o.groupby(['Fiscal Year'])['Exports_US$'].cumsum()
df_o['pct_change_yoy'] = df_o.groupby(['month'])['Exports_YTD'].pct_change()*100
#filtering data of fiscal year 2020-21
df_o_2020_21 = df_o.loc[df_o['Fiscal Year'].isin(['2020-2021'])]
df_o_2021_22 = df_o.loc[df_o['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_o_2020_21['month'], y=df_o_2020_21['Exports_YTD'],
name='Exports in 2020-21',
text=df_o_2020_21['Exports_YTD'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_o_2021_22['month'], y=df_o_2021_22['Exports_YTD'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_o_2021_22['Exports_YTD'],
textposition='top center',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_o_2021_22['month'],
y=df_o_2021_22['pct_change_yoy'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_o_2021_22['pct_change_yoy'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
from PIL import Image
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (US$ Billion)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Other Textiles Exports by Value",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
###############################################
#################
#################
elif page == 'Made-ups excluding Bedwear & Towels':
#####################################
#####################################
#importing csv file as dataframe
df = pd.read_csv('monthly_textile_exports_pbs.csv')
df['date'] = pd.to_datetime(df['date'], infer_datetime_format=True)
df['year'] = df['date'].dt.year #creates a year column out of datetime columns
df['month'] = df['date'].dt.month #creates a month column out of datetime columns
df['year'] = df['year'].astype('string') #converting year column into string for filtering
df['month'] = df['month'].astype('string') #converting year column into string for filtering
df['Fiscal Year'] = df['date'].dt.to_period('Q-JUN').dt.qyear.apply(lambda x: str(x-1) + "-" + str(x))
#adding month column
df['month'] = df['date'].dt.strftime('%b') #creating month names column
# knitwear chart value
df_m = df.loc[df['category'].isin(['Madeups'])]
#calculating year-to-date YTD exports
df_m['Exports_YTD'] = df_m.groupby(['Fiscal Year'])['Exports_US$'].cumsum()
df_m['pct_change_yoy'] = df_m.groupby(['month'])['Exports_YTD'].pct_change()*100
#filtering data of fiscal year 2020-21
df_m_2020_21 = df_m.loc[df_m['Fiscal Year'].isin(['2020-2021'])]
df_m_2021_22 = df_m.loc[df_m['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_m_2020_21['month'], y=df_m_2020_21['Exports_YTD'],
name='Exports in 2020-21',
text=df_m_2020_21['Exports_YTD'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_m_2021_22['month'], y=df_m_2021_22['Exports_YTD'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_m_2021_22['Exports_YTD'],
textposition='top center',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_m_2021_22['month'],
y=df_m_2021_22['pct_change_yoy'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_m_2021_22['pct_change_yoy'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
from PIL import Image
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (US$ Billion)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Madeups Exports by Value",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
###############################################
#############################################
#calculating year-to-date YTD exports by volume
df_m['Exports_YTD_vol'] = df_m.groupby(['Fiscal Year'])['volume'].cumsum()
df_m['pct_change_yoy_vol'] = df_m.groupby(['month'])['Exports_YTD_vol'].pct_change()*100
#filtering data of fiscal year 2020-21
df_m_2020_21 = df_m.loc[df_m['Fiscal Year'].isin(['2020-2021'])]
df_m_2021_22 = df_m.loc[df_m['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_m_2020_21['month'], y=df_m_2020_21['Exports_YTD_vol'],
name='Exports in 2020-21',
text=df_m_2020_21['Exports_YTD_vol'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_m_2021_22['month'], y=df_m_2021_22['Exports_YTD_vol'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_m_2021_22['Exports_YTD_vol'],
textposition='top center',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_m_2021_22['month'],
y=df_m_2021_22['pct_change_yoy_vol'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_m_2021_22['pct_change_yoy_vol'],
textposition='middle right',
texttemplate="%{text:.2f}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (Metric Tons)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Madeup Exports by Volume",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
#st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#################
elif page == 'Tents & Tarpaulines Exports':
#####################################
#####################################
#importing csv file as dataframe
df = pd.read_csv('monthly_textile_exports_pbs.csv')
df['date'] = pd.to_datetime(df['date'], infer_datetime_format=True)
df['year'] = df['date'].dt.year #creates a year column out of datetime columns
df['month'] = df['date'].dt.month #creates a month column out of datetime columns
df['year'] = df['year'].astype('string') #converting year column into string for filtering
df['month'] = df['month'].astype('string') #converting year column into string for filtering
df['Fiscal Year'] = df['date'].dt.to_period('Q-JUN').dt.qyear.apply(lambda x: str(x-1) + "-" + str(x))
#adding month column
df['month'] = df['date'].dt.strftime('%b') #creating month names column
# knitwear chart value
df_tents = df.loc[df['category'].isin(['Tents, Tarpaulines'])]
#calculating year-to-date YTD exports
df_tents['Exports_YTD'] = df_tents.groupby(['Fiscal Year'])['Exports_US$'].cumsum()
df_tents['pct_change_yoy'] = df_tents.groupby(['month'])['Exports_YTD'].pct_change()*100
#filtering data of fiscal year 2020-21
df_tents_2020_21 = df_tents.loc[df_tents['Fiscal Year'].isin(['2020-2021'])]
df_tents_2021_22 = df_tents.loc[df_tents['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_tents_2020_21['month'], y=df_tents_2020_21['Exports_YTD'],
name='Exports in 2020-21',
text=df_tents_2020_21['Exports_YTD'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_tents_2021_22['month'], y=df_tents_2021_22['Exports_YTD'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_tents_2021_22['Exports_YTD'],
textposition='top center',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_tents_2021_22['month'],
y=df_tents_2021_22['pct_change_yoy'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_tents_2021_22['pct_change_yoy'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
from PIL import Image
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (US$ Billion)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Tents & Tarpaulines Exports by Value",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
###############################################
#############################################
#calculating year-to-date YTD exports by volume
df_tents['Exports_YTD_vol'] = df_tents.groupby(['Fiscal Year'])['volume'].cumsum()
df_tents['pct_change_yoy_vol'] = df_tents.groupby(['month'])['Exports_YTD_vol'].pct_change()*100
#filtering data of fiscal year 2020-21
df_tents_2020_21 = df_tents.loc[df_tents['Fiscal Year'].isin(['2020-2021'])]
df_tents_2021_22 = df_tents.loc[df_tents['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_tents_2020_21['month'], y=df_tents_2020_21['Exports_YTD_vol'],
name='Exports in 2020-21',
text=df_tents_2020_21['Exports_YTD_vol'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_tents_2021_22['month'], y=df_tents_2021_22['Exports_YTD_vol'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_tents_2021_22['Exports_YTD_vol'],
textposition='top center',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_tents_2021_22['month'],
y=df_tents_2021_22['pct_change_yoy_vol'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_tents_2021_22['pct_change_yoy_vol'],
textposition='middle right',
texttemplate="%{text:.2f}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (Metric Tons)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Tents & Tarpaulines Exports by Volume",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#####################################
# tents yarn chart export price
df_tents['pct_change_yoy_price'] = df_tents.groupby(['month'])['unit_price'].pct_change()*100
df_tents_2020_21 = df_tents.loc[df_tents['Fiscal Year'].isin(['2020-2021'])]
df_tents_2021_22 = df_tents.loc[df_tents['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_tents_2020_21['month'], y=df_tents_2020_21['unit_price'],
name='Price in 2020-21',
text=df_tents_2020_21['unit_price'],
textposition='auto',
texttemplate='$%{text:.2f}',
hovertemplate='Exports price:$%{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_tents_2021_22['month'], y=df_tents_2021_22['unit_price'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Price in 2021-22',
text=df_tents_2021_22['unit_price'],
textposition='bottom right',
texttemplate="$%{text:.2f}",
line=dict(color='Green', width=4),
hovertemplate='Exports price:$%{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_tents_2021_22['month'],
y=df_tents_2021_22['pct_change_yoy_price'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_tents_2021_22['pct_change_yoy_price'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='US$ per Kg', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Tents & Tarpaulines Average Export Price",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
###################
elif page == 'Non-Cotton Yarn Exports':
#####################################
#####################################
#importing csv file as dataframe
df = pd.read_csv('monthly_textile_exports_pbs.csv')
df['date'] = pd.to_datetime(df['date'], infer_datetime_format=True)
df['year'] = df['date'].dt.year #creates a year column out of datetime columns
df['month'] = df['date'].dt.month #creates a month column out of datetime columns
df['year'] = df['year'].astype('string') #converting year column into string for filtering
df['month'] = df['month'].astype('string') #converting year column into string for filtering
df['Fiscal Year'] = df['date'].dt.to_period('Q-JUN').dt.qyear.apply(lambda x: str(x-1) + "-" + str(x))
#adding month column
df['month'] = df['date'].dt.strftime('%b') #creating month names column
# knitwear chart value
df_ncyarn = df.loc[df['category'].isin(['Non-Cotton Yarn'])]
#calculating year-to-date YTD exports
df_ncyarn['Exports_YTD'] = df_ncyarn.groupby(['Fiscal Year'])['Exports_US$'].cumsum()
df_ncyarn['pct_change_yoy'] = df_ncyarn.groupby(['month'])['Exports_YTD'].pct_change()*100
#filtering data of fiscal year 2020-21
df_ncyarn_2020_21 = df_ncyarn.loc[df_ncyarn['Fiscal Year'].isin(['2020-2021'])]
df_ncyarn_2021_22 = df_ncyarn.loc[df_ncyarn['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_ncyarn_2020_21['month'], y=df_ncyarn_2020_21['Exports_YTD'],
name='Exports in 2020-21',
text=df_ncyarn_2020_21['Exports_YTD'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_ncyarn_2021_22['month'], y=df_ncyarn_2021_22['Exports_YTD'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_ncyarn_2021_22['Exports_YTD'],
textposition='top center',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_ncyarn_2021_22['month'],
y=df_ncyarn_2021_22['pct_change_yoy'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_ncyarn_2021_22['pct_change_yoy'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
from PIL import Image
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (US$ Billion)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Non-Cotton Yarn Exports by Value",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
###############################################
#############################################
#calculating year-to-date YTD exports by volume
df_ncyarn['Exports_YTD_vol'] = df_ncyarn.groupby(['Fiscal Year'])['volume'].cumsum()
df_ncyarn['pct_change_yoy_vol'] = df_ncyarn.groupby(['month'])['Exports_YTD_vol'].pct_change()*100
#filtering data of fiscal year 2020-21
df_ncyarn_2020_21 = df_ncyarn.loc[df_ncyarn['Fiscal Year'].isin(['2020-2021'])]
df_ncyarn_2021_22 = df_ncyarn.loc[df_ncyarn['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_ncyarn_2020_21['month'], y=df_ncyarn_2020_21['Exports_YTD_vol'],
name='Exports in 2020-21',
text=df_ncyarn_2020_21['Exports_YTD_vol'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_ncyarn_2021_22['month'], y=df_ncyarn_2021_22['Exports_YTD_vol'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_ncyarn_2021_22['Exports_YTD_vol'],
textposition='top center',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_ncyarn_2021_22['month'],
y=df_ncyarn_2021_22['pct_change_yoy_vol'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_ncyarn_2021_22['pct_change_yoy_vol'],
textposition='middle right',
texttemplate="%{text:.2f}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (Metric Tons)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Non-Cotton Exports by Volume",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#####################################
# non-cotton yarn chart export price
df_ncyarn['pct_change_yoy_price'] = df_ncyarn.groupby(['month'])['unit_price'].pct_change()*100
df_ncyarn_2020_21 = df_ncyarn.loc[df_ncyarn['Fiscal Year'].isin(['2020-2021'])]
df_ncyarn_2021_22 = df_ncyarn.loc[df_ncyarn['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_ncyarn_2020_21['month'], y=df_ncyarn_2020_21['unit_price'],
name='Price in 2020-21',
text=df_ncyarn_2020_21['unit_price'],
textposition='auto',
texttemplate='$%{text:.2f}',
hovertemplate='Exports price:$%{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_ncyarn_2021_22['month'], y=df_ncyarn_2021_22['unit_price'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Price in 2021-22',
text=df_ncyarn_2021_22['unit_price'],
textposition='bottom right',
texttemplate="$%{text:.2f}",
line=dict(color='Green', width=4),
hovertemplate='Exports price:$%{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_ncyarn_2021_22['month'],
y=df_ncyarn_2021_22['pct_change_yoy_price'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_ncyarn_2021_22['pct_change_yoy_price'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='US$ per Kg', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Non-Cotton Yarn Average Export Price",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
################
elif page == 'Bedwear Exports':
#####################################
#####################################
#importing csv file as dataframe
df = pd.read_csv('monthly_textile_exports_pbs.csv')
df['date'] = pd.to_datetime(df['date'], infer_datetime_format=True)
df['year'] = df['date'].dt.year #creates a year column out of datetime columns
df['month'] = df['date'].dt.month #creates a month column out of datetime columns
df['year'] = df['year'].astype('string') #converting year column into string for filtering
df['month'] = df['month'].astype('string') #converting year column into string for filtering
df['Fiscal Year'] = df['date'].dt.to_period('Q-JUN').dt.qyear.apply(lambda x: str(x-1) + "-" + str(x))
#adding month column
df['month'] = df['date'].dt.strftime('%b') #creating month names column
#####################################
# bedwear chart value
df_bedwear = df.loc[df['category'].isin(['Bedwear'])]
#calculating year-to-date YTD exports
df_bedwear['Exports_YTD'] = df_bedwear.groupby(['Fiscal Year'])['Exports_US$'].cumsum()
df_bedwear['pct_change_yoy'] = df_bedwear.groupby(['month'])['Exports_YTD'].pct_change()*100
#filtering data of fiscal year 2020-21
df_bedwear_2020_21 = df_bedwear.loc[df_bedwear['Fiscal Year'].isin(['2020-2021'])]
df_bedwear_2021_22 = df_bedwear.loc[df_bedwear['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_bedwear_2020_21['month'], y=df_bedwear_2020_21['Exports_YTD'],
name='Exports in 2020-21',
text=df_bedwear_2020_21['Exports_YTD'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_bedwear_2021_22['month'], y=df_bedwear_2021_22['Exports_YTD'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_bedwear_2021_22['Exports_YTD'],
textposition='top center',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_bedwear_2021_22['month'],
y=df_bedwear_2021_22['pct_change_yoy'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_bedwear_2021_22['pct_change_yoy'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (US$ Billion)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Bedwear Exports by Value",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#####################################
#####################################
# bedwear chart volume
df_bedwear = df.loc[df['category'].isin(['Bedwear'])]
#calculating year-to-date YTD exports
df_bedwear['Exports_YTD_vol'] = df_bedwear.groupby(['Fiscal Year'])['volume'].cumsum()
df_bedwear['pct_change_yoy_vol'] = df_bedwear.groupby(['month'])['Exports_YTD_vol'].pct_change()*100
#filtering data of fiscal year 2020-21
df_bedwear_2020_21 = df_bedwear.loc[df_bedwear['Fiscal Year'].isin(['2020-2021'])]
df_bedwear_2021_22 = df_bedwear.loc[df_bedwear['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_bedwear_2020_21['month'], y=df_bedwear_2020_21['Exports_YTD_vol'],
name='Exports in 2020-21',
text=df_bedwear_2020_21['Exports_YTD_vol'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_bedwear_2021_22['month'], y=df_bedwear_2021_22['Exports_YTD_vol'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_bedwear_2021_22['Exports_YTD_vol'],
textposition='top center',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_bedwear_2021_22['month'],
y=df_bedwear_2021_22['pct_change_yoy_vol'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_bedwear_2021_22['pct_change_yoy_vol'],
textposition='middle right',
texttemplate="%{text:.2f}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (Metric Tons)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Bedwear Exports by Volume",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#####################################
# garment chart export price
df_bedwear['pct_change_yoy_price'] = df_bedwear.groupby(['month'])['unit_price'].pct_change()*100
df_bedwear_2020_21 = df_bedwear.loc[df_bedwear['Fiscal Year'].isin(['2020-2021'])]
df_bedwear_2021_22 = df_bedwear.loc[df_bedwear['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_bedwear_2020_21['month'], y=df_bedwear_2020_21['unit_price'],
name='Price in 2020-21',
text=df_bedwear_2020_21['unit_price'],
textposition='auto',
texttemplate='$%{text:.2f}',
hovertemplate='Exports price:$%{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_bedwear_2021_22['month'], y=df_bedwear_2021_22['unit_price'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Price in 2021-22',
text=df_bedwear_2021_22['unit_price'],
textposition='bottom right',
texttemplate="$%{text:.2f}",
line=dict(color='Green', width=4),
hovertemplate='Exports price:$%{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_bedwear_2021_22['month'],
y=df_bedwear_2021_22['pct_change_yoy_price'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_bedwear_2021_22['pct_change_yoy_price'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='US$ per Kg', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Bedwear Average Export Price",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#####################################
################
elif page == 'Towel Exports':
#####################################
#####################################
#importing csv file as dataframe
df = pd.read_csv('monthly_textile_exports_pbs.csv')
df['date'] = pd.to_datetime(df['date'], infer_datetime_format=True)
df['year'] = df['date'].dt.year #creates a year column out of datetime columns
df['month'] = df['date'].dt.month #creates a month column out of datetime columns
df['year'] = df['year'].astype('string') #converting year column into string for filtering
df['month'] = df['month'].astype('string') #converting year column into string for filtering
df['Fiscal Year'] = df['date'].dt.to_period('Q-JUN').dt.qyear.apply(lambda x: str(x-1) + "-" + str(x))
#adding month column
df['month'] = df['date'].dt.strftime('%b') #creating month names column
#####################################
# bedwear chart value
df_towels = df.loc[df['category'].isin(['Towels'])]
#calculating year-to-date YTD exports
df_towels['Exports_YTD'] = df_towels.groupby(['Fiscal Year'])['Exports_US$'].cumsum()
df_towels['pct_change_yoy'] = df_towels.groupby(['month'])['Exports_YTD'].pct_change()*100
#filtering data of fiscal year 2020-21
df_towels_2020_21 = df_towels.loc[df_towels['Fiscal Year'].isin(['2020-2021'])]
df_towels_2021_22 = df_towels.loc[df_towels['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_towels_2020_21['month'], y=df_towels_2020_21['Exports_YTD'],
name='Exports in 2020-21',
text=df_towels_2020_21['Exports_YTD'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_towels_2021_22['month'], y=df_towels_2021_22['Exports_YTD'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_towels_2021_22['Exports_YTD'],
textposition='top center',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_towels_2021_22['month'],
y=df_towels_2021_22['pct_change_yoy'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_towels_2021_22['pct_change_yoy'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (US$)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Towels Exports by Value",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#####################################
#####################################
# bedwear chart volume
#calculating year-to-date YTD exports
df_towels['Exports_YTD_vol'] = df_towels.groupby(['Fiscal Year'])['volume'].cumsum()
df_towels['pct_change_yoy_vol'] = df_towels.groupby(['month'])['Exports_YTD_vol'].pct_change()*100
#filtering data of fiscal year 2020-21
df_towels_2020_21 = df_towels.loc[df_towels['Fiscal Year'].isin(['2020-2021'])]
df_towels_2021_22 = df_towels.loc[df_towels['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_towels_2020_21['month'], y=df_towels_2020_21['Exports_YTD_vol'],
name='Exports in 2020-21',
text=df_towels_2020_21['Exports_YTD_vol'],
textposition='auto',
texttemplate='%{text:,}',
hovertemplate='Exports to date: %{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_towels_2021_22['month'], y=df_towels_2021_22['Exports_YTD_vol'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Exports in 2021-22',
text=df_towels_2021_22['Exports_YTD_vol'],
textposition='top center',
texttemplate="%{text:,}",
line=dict(color='Green', width=4),
hovertemplate='Exports to date: %{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_towels_2021_22['month'],
y=df_towels_2021_22['pct_change_yoy_vol'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_towels_2021_22['pct_change_yoy_vol'],
textposition='middle right',
texttemplate="%{text:.2f}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='Cumulative Exports (Metric Tons)', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Towels Exports by Volume",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#####################################
# towels chart export price
df_towels['pct_change_yoy_price'] = df_towels.groupby(['month'])['unit_price'].pct_change()*100
df_towels_2020_21 = df_towels.loc[df_towels['Fiscal Year'].isin(['2020-2021'])]
df_towels_2021_22 = df_towels.loc[df_towels['Fiscal Year'].isin(['2021-2022'])]
fig_ytd = make_subplots(rows=2, cols=1, shared_xaxes=True,
vertical_spacing=0.01,
row_heights=[0.20,0.80])
#fig_ytd = go.Figure()
#fig_ytd = make_subplots(specs=[[{'secondary_y': True}]], rows=1, cols=1) # to make subplot with 1 row and 1 col
# Add traces
fig_ytd.add_trace(go.Bar(x=df_towels_2020_21['month'], y=df_towels_2020_21['unit_price'],
name='Price in 2020-21',
text=df_towels_2020_21['unit_price'],
textposition='auto',
texttemplate='$%{text:.2f}',
hovertemplate='Exports price:$%{y}'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_towels_2021_22['month'], y=df_towels_2021_22['unit_price'],
mode='markers+lines+text',
marker=dict(size=16, color="Green"),
name='Price in 2021-22',
text=df_towels_2021_22['unit_price'],
textposition='bottom right',
texttemplate="$%{text:.2f}",
line=dict(color='Green', width=4),
hovertemplate='Exports price:$%{y}B'
), row=2, col=1)
fig_ytd.add_trace(go.Scatter(x=df_towels_2021_22['month'],
y=df_towels_2021_22['pct_change_yoy_price'],
mode="lines+markers+text",
marker=dict(size=16, color="Red"),
name="%Change from previous year",
text=df_towels_2021_22['pct_change_yoy_price'],
textposition='middle right',
texttemplate="%{text:.2s}%",
line=dict(color='Red', width=1, dash='dash'),
hovertemplate='%{y}',
), row=1, col=1 )
#fig_ytd.update_yaxes(title_text="Cumulative Exports in US$")
image = Image.open('logo.png')
#st.image(logo.png)
fig_ytd.add_layout_image(
dict(
source=image,
xref="paper", yref="paper",
x=1, y=-0.2, #image postion on chart
sizex=0.1, sizey=0.1, #image size on chart
xanchor="right", yanchor="bottom"
))
fig_ytd.update_layout(
autosize=True, height=650, width=1050,
legend_traceorder="reversed",
margin=dict(t=90, b=110, l=90, r=40),
title_font=dict(size=25, color='#111111', family="fjalla one, sans-serif"),
xaxis_title='', yaxis_title="",
plot_bgcolor='#ffffff',
paper_bgcolor='#ffffff',
font=dict(color='#111111', size=18, family="roboto, sans-serif"), #font of lablels of axises
)
#updates axes
fig_ytd.update_xaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_yaxes(visible=False, row=1, col=1 ) #range=[0, 100] if need to set y-axis range
fig_ytd.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1 )
fig_ytd.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24))
fig_ytd.update_yaxes(title='US$ per Kg', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1)
fig_ytd.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99')
###############
#title
fig_ytd.add_annotation(
text="Pakistan Towels Average Export Price",
font=dict(family='Fjalla one', color='#006BA2', size=36),
xref="paper", yref="paper",
x=0, y=1.18,
showarrow=False,
arrowhead=1)
#subtitle
fig_ytd.add_annotation(
text="current year vs. previous year",
font=dict(family='roboto', color='black', size=24),
xref="paper", yref="paper",
x=0, y=1.10,
showarrow=False,
arrowhead=1)
#data reference
fig_ytd.add_annotation(
text="Source: Pakistan Bureau of Statistics",
font=dict(family='Fjalla one', color='#758D99', size=20),
xref="paper", yref="paper",
x=0, y=-0.2,
showarrow=False,
arrowhead=1)
fig_ytd.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
st.plotly_chart(fig_ytd, use_container_width=True) # to show Figure; container width true makes fig. size responsive
#####################################
elif page == 'Total Textile Exports':
#st.title("Pakistan Textile Exports")
#importing csv file as dataframe
df = | pd.read_csv('monthly_textile_exports_pbs.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 13 14:39:19 2021
@author: <NAME>
"""
from multiprocessing import freeze_support
import pickle
import numpy as np
import pandas as pd
from evaluate_utils import sklearn_pipeline_evaluator, get_cv
from ml_models import svc_selector_pipeline, svc_selector_grid
from ml_models import lr_selector_pipeline, lr_selector_grid
from ml_models import qda_selector_pipeline, qda_selector_grid
from ml_models import gpr_selector_pipeline, gpr_selector_grid
from constants import RANDOM_STATE, N_SPLITS
import warnings
warnings.filterwarnings("ignore", message="The objective has been evaluated at this point before.")
warnings.simplefilter('always', category=UserWarning)
N_SPLITS_OUTER = 25
SHUFFLE_SPLIT = True
def drop_the_right_rows(df, output_covariate, covariates_to_check):
# first drop the rows where there is no output
df = df.copy()
df = df.loc[~ | pd.isnull(df[output_covariate]) | pandas.isnull |
"""
This file creates benchmarks that are used to evaluate the various generators. A benchmark consists of one or more
DNN models (in keras format) for which we have one or more tuples of <images, labels>.
Date:
July 5, 2020
Project:
ECE653 Final Project: Check Your Perturbations
Authors:
name: <NAME>, <NAME>, <NAME>
contact: <EMAIL>
"""
from enum import Enum
from tensorflow import keras
import numpy as np
import pandas as pd
class BenchmarkEnums(Enum):
""" This is an enum that contains all the different benchmarks. """
Demo = {
"models": ["./../src/data/models/MNIST/regularCNN"],
"images": "./../src/data/images/MNIST/demo.npy",
"similarityType": "l2",
"similarityMeasure": 10,
"timeLimit": 50
}
Main = {
"models": ["./../src/data/models/MNIST/regularFCNN", "./../src/data/models/MNIST/robustFCNN"],
"images": "./../src/data/images/MNIST/demo.npy",
"similarityType": "l2",
"similarityMeasure": 10,
"timeLimit": 600
}
MainSimilar = {
"models": ["./../src/data/models/MNIST/regularFCNN", "./../src/data/models/MNIST/robustFCNN"],
"images": "./../src/data/images/MNIST/demo.npy",
"similarityType": "l2",
"similarityMeasure": 5,
"timeLimit": 600
}
Thermometer = {
"models": ["./../src/data/models/MNIST/thermometerCNN"],
"images": "./../src/data/images/MNIST/demo.npy",
"similarityType": "l2",
"similarityMeasure": 10,
"timeLimit": 50
}
MainCNN = {
"models": ["./../src/data/models/MNIST/regularCNN", "./../src/data/models/MNIST/robustCNN"],
"images": "./../src/data/images/MNIST/demo.npy",
"similarityType": "l2",
"similarityMeasure": 10,
"timeLimit": 600
}
def __str__(self):
return self.value
class Benchmark:
""" This class contains a given benchmark. """
def __init__(self, benchmarkType, similarityType=None, similarityMeasure=None, verbose=False):
"""
Standard init function.
:param benchmarkType: Enum that is found in BenchmarkEnums.
"""
if benchmarkType not in BenchmarkEnums:
raise Exception(f"type: {benchmarkType} not in benchmark.")
self.name = benchmarkType.name
self.type = benchmarkType.value
self.data = | pd.DataFrame(columns=['modelName', 'model', 'image', 'label']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 13:37:10 2019
@author:Imarticus Machine Learning Team
"""
import numpy as np
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
color = sns.color_palette()
pd.options.mode.chained_assignment = None # default='warn'
order_products_test_df= | pd.read_csv("order_products_test.csv") | pandas.read_csv |
import pandas as pd
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor
from xgboost import XGBRegressor
from sklearn.preprocessing import PolynomialFeatures as pf
from sklearn import linear_model as lm
from sklearn.model_selection import RepeatedKFold
rkf = RepeatedKFold(n_splits=10, n_repeats=10)
data = pd.read_csv('C:\\Users\\<NAME>\\Documents\\Research Projects\\Forecast of Rainfall Quantity and its variation using Envrionmental Features\\Data\\Normalized & Combined Data\\All Districts.csv')
data = data.drop(columns=['Index', 'District'])
base = [RandomForestRegressor(n_estimators=100, max_depth=10), ExtraTreesRegressor(n_estimators=90, max_depth=15), GradientBoostingRegressor(n_estimators=60, max_depth=5), XGBRegressor(n_estimators=50, max_depth=5), BaggingRegressor(n_estimators=50, base_estimator=lm.LinearRegression())]
name = ['RFR', 'ETR', 'GBR', 'XGBR', 'BAR']
df1 = pd.DataFrame()
c = 0
for tr_i, ts_i in rkf.split(data):
train, test = data.iloc[tr_i], data.iloc[ts_i]
train_x = train.drop(columns=['Rainfall'])
train_y = train['Rainfall']
test_x = test.drop(columns=['Rainfall'])
test_y = test['Rainfall']
d1 = {}
for i, j in zip(base, name):
print(j, c)
if j == 'BAR':
poly = pf(degree=4)
train_x = poly.fit_transform(train_x)
test_x = poly.fit_transform(test_x)
model = i
model.fit(train_x, train_y)
ts_p = model.predict(test_x)
d1[j] = list(ts_p)
d1['Actual'] = list(test_y)
df_ts = | pd.DataFrame(d1, columns=['RFR', 'ETR', 'GBR', 'XGBR', 'BAR', 'Actual']) | pandas.DataFrame |
import argparse
from argparse import Namespace
from pathlib import Path
from omegaconf import OmegaConf
import pandas as pd
from sklearn.model_selection import train_test_split
def create_dataset(filepath: Path) -> pd.DataFrame:
with open(filepath, mode="rb") as io:
list_of_sentences = io.readlines()
data = []
for sentence in list_of_sentences:
try:
decoded_sentence = sentence.strip().decode("utf-8")
label = int(decoded_sentence[0])
document = decoded_sentence[2:]
data.append({"document": document, "label": label})
except UnicodeDecodeError:
continue
return | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import os
import csv
import gdal
import numpy as np
import re
def ad_industry_profiles_dict(dicts):
dict_names = ["load_profile_industry_chemicals_and_petrochemicals_yearlong_2018",
"load_profile_industry_food_and_tobacco_yearlong_2018",
"load_profile_industry_iron_and_steel_yearlong_2018",
"load_profile_industry_non_metalic_minerals_yearlong_2018",
"load_profile_industry_paper_yearlong_2018"]
data = []
for name, dictionary in zip(dict_names, dicts):
raw_data = pd.DataFrame(dictionary[name])
raw_data = raw_data.loc[:, ("NUTS0_code", "process", "hour", "load")]
raw_data["load"] = pd.to_numeric(raw_data["load"])
raw_data["hour"] = | pd.to_numeric(raw_data["hour"]) | pandas.to_numeric |
#!/usr/bin/env python3
import glob
from math import log
import os, os.path
import scipy.stats
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy
def cognate_class_count_plot():
# Actual class counts
cognate_counts = []
with open("uralex_counts.csv","r") as fp:
for line in fp:
meaning, cognates = line.strip().split(",")
cognate_counts.append(int(cognates))
# Best fitting distribution
nbinom_support = range(0,max(cognate_counts)+2)
nbinom_probs = [scipy.stats.nbinom(9,0.49).pmf(n) for n in nbinom_support]
plt.figure(figsize=(12,6))
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.hist(cognate_counts,bins=range(1,max(cognate_counts)+2), align="left")
ax1.set_xticks([5,10,15,20])
ax1.set_xlim(0,max(cognate_counts)+1.5)
ax1.set_xlabel("Cognate classes", fontsize=12)
ax1.set_ylabel("Meanings in UraLex", fontsize=12)
ax2.stem(nbinom_support,nbinom_probs, linefmt="C1-", markerfmt="C1o")
a, b = ax2.get_ylim()
ax2.set_ylim(0.0,b)
ax2.set_ylabel("Negative binomial probability", fontsize=12)
plt.tight_layout()
plt.savefig("plots/cognate_dist.png")
def tiger_rate_dist_plot():
rates = []
with open("analyses/uralex/uralex_rates.txt","r") as fp:
for line in fp:
rates.extend([float(line.strip().split()[-1])])
fig, ax = plt.subplots()
ax.hist(rates,19)
ax.set_xlabel("TIGER value")
ax.set_ylabel("Number of meanings")
plt.tight_layout()
plt.savefig("plots/uralex_rates_dist.png")
def tiger_rate_plot():
dfs = []
data_models = glob.glob("analyses/*")
data_names = []
for model in data_models:
name = os.path.split(model)[-1]
data_names.append(name)
rates_files = glob.glob(os.path.join(model, "*rates.txt"))
rates = []
for rates_file in rates_files:
with open(rates_file, "r") as fp:
rates.extend([float(x.strip().split()[-1]) for x in fp.readlines()])
df = | pd.DataFrame({name: rates}) | pandas.DataFrame |
import pytest
from pandas import DataFrame
from tests.utils import assert_dataframes_equals
from weaverbird.backends.pandas_executor.steps.filter import execute_filter
from weaverbird.pipeline.conditions import ComparisonCondition
from weaverbird.pipeline.steps import FilterStep
@pytest.fixture
def sample_df():
return DataFrame({'colA': ['toto', 'tutu', 'tata'], 'colB': [1, 2, 3], 'colC': [100, 50, 25]})
@pytest.mark.parametrize('value', [0, False, True, 0.0, 1, 1.5, '', '0', None, [], [0], '0.0'])
def test_simple_condition_valid_values(value) -> None:
# Ensure pydantic cast does not change types for `value` field:
sc = ComparisonCondition(column='x', operator='eq', value=value)
result_value = sc.value
assert value == result_value
assert type(value) == type(result_value)
def test_simple_eq_filter(sample_df):
step = FilterStep(
name='filter',
condition={
'column': 'colA',
'operator': 'eq',
'value': 'tutu',
},
)
df_result = execute_filter(step, sample_df)
assert_dataframes_equals(df_result, DataFrame({'colA': ['tutu'], 'colB': [2], 'colC': [50]}))
def test_simple_ne_filter(sample_df):
step = FilterStep(
name='filter',
condition={
'column': 'colA',
'operator': 'ne',
'value': 'tutu',
},
)
df_result = execute_filter(step, sample_df)
assert_dataframes_equals(
df_result, DataFrame({'colA': ['toto', 'tata'], 'colB': [1, 3], 'colC': [100, 25]})
)
def test_simple_gt_filter(sample_df):
step = FilterStep(
name='filter',
condition={
'column': 'colB',
'operator': 'gt',
'value': 2,
},
)
df_result = execute_filter(step, sample_df)
assert_dataframes_equals(df_result, DataFrame({'colA': ['tata'], 'colB': [3], 'colC': [25]}))
def test_simple_ge_filter(sample_df):
step = FilterStep(
name='filter',
condition={
'column': 'colB',
'operator': 'ge',
'value': 2,
},
)
df_result = execute_filter(step, sample_df)
assert_dataframes_equals(
df_result, DataFrame({'colA': ['tutu', 'tata'], 'colB': [2, 3], 'colC': [50, 25]})
)
def test_simple_lt_filter(sample_df):
step = FilterStep(
name='filter',
condition={
'column': 'colB',
'operator': 'lt',
'value': 2,
},
)
df_result = execute_filter(step, sample_df)
assert_dataframes_equals(df_result, DataFrame({'colA': ['toto'], 'colB': [1], 'colC': [100]}))
def test_simple_le_filter(sample_df):
step = FilterStep(
name='filter',
condition={
'column': 'colB',
'operator': 'le',
'value': 2,
},
)
df_result = execute_filter(step, sample_df)
assert_dataframes_equals(
df_result, DataFrame({'colA': ['toto', 'tutu'], 'colB': [1, 2], 'colC': [100, 50]})
)
def test_simple_in_filter(sample_df):
step = FilterStep(
name='filter',
condition={
'column': 'colA',
'operator': 'in',
'value': ['toto', 'tutu'],
},
)
df_result = execute_filter(step, sample_df)
assert_dataframes_equals(
df_result, DataFrame({'colA': ['toto', 'tutu'], 'colB': [1, 2], 'colC': [100, 50]})
)
def test_simple_nin_filter(sample_df):
step = FilterStep(
name='filter',
condition={
'column': 'colA',
'operator': 'nin',
'value': ['toto', 'tutu'],
},
)
df_result = execute_filter(step, sample_df)
assert_dataframes_equals(df_result, | DataFrame({'colA': ['tata'], 'colB': [3], 'colC': [25]}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
from collections import Counter
from scipy.stats import hypergeom
fdr_threshold = 0.05
def main():
os.makedirs('results/enrichment', exist_ok=True)
os.makedirs('results/GO', exist_ok=True)
# LOAD
# single cell gene data
all_gene_data = pd.read_csv('data/gene_lists/all-scRNA-data.csv')
# normalised RPKM bulk data corrected for age, sex, etc
bulk_data = pd.read_csv('data/processed_psychencode/PsychENCODE-prenatal-bulk-RPKM-data-scRNA-filtered-Winsor-log2-residualised.csv')
# gene-wise correlation with PC components
correlation_results = pd.read_csv('results/gene_correlations/PCA_correlations-KendallTau-residualisedRPKM.csv')
# fetal background geneset = all filtered genes in bulk data
background_genes = | pd.read_csv('data/gene_lists/background_genes.txt', header=None) | pandas.read_csv |
import os
import json
import random
import pandas as pd
import numpy as np
from split_otus import split_otus, write_kmer_out, calc_kmer_feat
from config import cfg
def create_lsa_from_sparcc(sparcc_thresh):
path = os.path.join(cfg.DATA_DIR, cfg.SPARCC_FILE)
sparcc_data = pd.read_csv(path, sep='\t', index_col=0)
cols = sparcc_data.columns.values.tolist()
rows = sparcc_data.index.values.tolist()
col_otu_idx = list(map(lambda x: int(x[4:]), cols))
row_otu_idx = list(map(lambda x: int(x[4:]), rows))
sparcc_mat = sparcc_data.to_numpy()
edge_idx = np.where((sparcc_mat >= sparcc_thresh)|(sparcc_mat<=-sparcc_thresh) )
row_otu_idx = np.array(row_otu_idx)
col_otu_idx = np.array(col_otu_idx)
otu_src_idx = row_otu_idx[edge_idx[0]]
otu_dst_idx = col_otu_idx[edge_idx[1]]
# write like lsa format
edge_data = pd.DataFrame({'index1': otu_src_idx, 'index2': otu_dst_idx})
edge_data.to_csv(os.path.join(cfg.DATA_DIR, cfg.LSA_EDGE_FILE), sep='\t')
def create_id_map():
# to avoid otu not in kmer feats
feats = calc_kmer_feat(cfg.KMER_LENGH)
path = os.path.join(cfg.DATA_DIR, cfg.SPARCC_FILE)
sparcc_data = pd.read_csv(path, sep='\t', index_col=0)
cols = sparcc_data.columns.values.tolist()
otu_idx = list(map(lambda x: int(x[4:]), cols))
nodes=list(set(otu_idx))
random.shuffle(nodes)
id_map = {}
i = 0
for idx in nodes:
key = 'OTU_'+str(idx)
if key not in feats.keys():
continue
id_map[key] = i
i += 1
if not os.path.isdir(cfg.OUTPUT_DIR):
os.makedirs(cfg.OUTPUT_DIR)
with open(os.path.join(cfg.OUTPUT_DIR, cfg.ID_MAP_FILE), 'w') as f:
json.dump(id_map, f)
def load_id_map():
with open(os.path.join(cfg.OUTPUT_DIR, cfg.ID_MAP_FILE), 'r') as f:
map = json.load(f)
return map
def create_edge_from_lsa():
path = os.path.join(cfg.DATA_DIR, cfg.LSA_EDGE_FILE)
lsa_data = pd.read_csv(path, sep='\t')
src_nodes = lsa_data.loc[:, 'index1'].values.tolist()
dst_nodes = lsa_data.loc[:, 'index2'].values.tolist()
edges = []
id_map = load_id_map()
for src_node, dst_node in zip(src_nodes, dst_nodes):
src_node = 'OTU_' + str(src_node)
dst_node = 'OTU_'+str(dst_node)
# to avoid not in
if src_node not in id_map.keys() or dst_node not in id_map.keys():
continue
src_idx = id_map[src_node]
dst_idx = id_map[dst_node]
edges.append([src_idx, dst_idx])
return edges
def create_graph(id2idx, edges):
graph = {}
graph["directed"] = False
graph['graph'] = {"name": "disjoint_union( , )"}
nodes = []
train_num = int(len(id2idx) * 0.7)
val_num = int(len(id2idx) * 0.2)
# features=read_kmer_feat()
# TODO: the label is not used?
# labels=create_class_map()
for i, (id, idx) in enumerate(id2idx.items()):
is_test = False
is_val = False
if i > train_num and i < (train_num + val_num):
is_val = True
elif i >= (train_num + val_num):
is_test = True
# feature = features[id]
# label=labels[id]
node = {"test": is_test,
'id': id,
'feature': None,
'val': is_val,
'label': None
}
nodes.append(node)
links = []
for src, dst in edges:
link = {'test_removed': False,
'train_removed': False,
'target': dst,
'source': src
}
links.append(link)
graph['nodes'] = nodes
graph['links'] = links
graph['multigraph'] = False
with open(os.path.join(cfg.OUTPUT_DIR, cfg.GRAPH_FILE), 'w') as f:
json.dump(graph, f)
def create_features():
features = calc_kmer_feat(cfg.KMER_LENGH)
id_map = load_id_map()
filter_features = []
for id in id_map.keys():
filter_features.append(features[id])
np.save(os.path.join(cfg.OUTPUT_DIR, cfg.FEATURE_FILE), filter_features)
def create_class_map(low_thresh=None,high_thresh=None):
id_map = load_id_map()
path = os.path.join(cfg.DATA_DIR, cfg.KO_PREDICTED_FILE)
# need index_col parameter
label_data = | pd.read_csv(path, sep='\t', index_col=0) | pandas.read_csv |
import pandas as pd
import seaborn as sb
from sklearn.linear_model import LinearRegression
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.tsa.seasonal import seasonal_decompose
import numpy as np
import pmdarima
import matplotlib.pyplot as plt
import seaborn as sb
import matplotlib.colors
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.metrics import mean_squared_error
import sklearn.metrics as metrics
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import silhouette_score
import tsfel
#MAIN
size=(7,5)
plt.style.use('seaborn')
sb.set_style("darkgrid")
color_list = ['blue','red','green']
#cmap = matplotlib.colors.LinearSegmentedColormap.from_list(cluster_values, color_list)
#IMPORT
df=pd.read_csv('Data kategorier.csv',skiprows=2,index_col=['READ_TIME_Date'])
df.index = pd.to_datetime(df.index)
# 1) Weekday vs Weekend for apartments and houses
Houses=df.iloc[:,5:8]
Houses.columns=['sum','antal','average']
Houses['Type']='House'
Apartments=df.iloc[:,92:95]
Apartments.columns=['sum','antal','average']
Apartments['Type']='Apartment'
data=pd.concat([Houses,Apartments])
data['dayn']=data.index.weekday
data['Day']=data.dayn.apply(lambda x: 'weekend' if x>=5 else 'weekday')
plt.figure(figsize=size)
ax=sb.lineplot(data.index.hour,data.average,hue=data.Type,style=data.Day)
ax.set(xlabel='Hour', ylabel='Average household consumption [kW]')
plt.xlabel=('Hour')
plt.ylabel=('Average household consumption [kW]')
plt.show()
# 2) Age over months
A1 = df.loc[:,df.columns.isin(k for k in df.columns if 'Average' in k and 'a1' in k)]
A1['Average']=A1.mean(axis=1)
A1['Age']='18-30'
A2 = df.loc[:,df.columns.isin(k for k in df.columns if 'Average' in k and 'a2' in k)]
A2['Average']=A2.mean(axis=1)
A2['Age']='30-65'
A3 = df.loc[:,df.columns.isin(k for k in df.columns if 'Average' in k and 'a3' in k)]
A3['Average']=A3.mean(axis=1)
A3['Age']='65+'
data=pd.concat([A1.iloc[:,-2:],A2.iloc[:,-2:],A3.iloc[:,-2:]])
data['Month']=data.index.month_name(locale='French')
data['Monthn']=data.index.month
data['Season']=data.Monthn.apply(lambda x: 'Winter' if x in [12,1,2] else('Spring' if x in [3,4,5] else('Summer' if x in [6,7,8] else ('Fall'))))
plt.figure(figsize=size)
ax=sb.lineplot(data.index.hour,data.Average,hue=data.Age,style=data.Season, ci=None)
ax.set(xlabel='Hour', ylabel='Average household consumption [kW]')
plt.show()
# 3) Number of children vs Weekend and age 30-64
C0 = df.loc[:,df.columns.isin(k for k in df.columns if 'Average' in k and '0b' in k and 'a2' in k)]
C0['Average']=C0.mean(axis=1)
C0['Children']='No'
C1 = df.loc[:,df.columns.isin(k for k in df.columns if 'Average' in k and 'mb' in k and 'a2' in k)]
C1['Average']=C1.mean(axis=1)
C1['Children']='Yes'
data=pd.concat([C0.iloc[:,-2:],C1.iloc[:,-2:]])
data['dayn']=data.index.weekday
data['Day']=data.dayn.apply(lambda x: 'weekend' if x>=5 else 'weekday')
plt.figure(figsize=(10,5))
ax = plt.subplot(122)
ax.xaxis.set_major_locator(plt.MaxNLocator(6))
ax = sb.lineplot(data.index.hour,data.Average,hue=data.Children,style=data.Day, ci=None)
ax.set(xlabel='Hour', ylabel='')
plt.axvline(x=18)
ax.title.set_text('Age: 30-64')
plt.show()
#Age 18-30
C0 = df.loc[:,df.columns.isin(k for k in df.columns if 'Average' in k and '0b' in k and 'a1' in k)]
C0['Average']=C0.mean(axis=1)
C0['Children']='No'
C1 = df.loc[:,df.columns.isin(k for k in df.columns if 'Average' in k and 'mb' in k and 'a1' in k)]
C1['Average']=C1.mean(axis=1)
C1['Children']='Yes'
data1= | pd.concat([C0.iloc[:,-2:],C1.iloc[:,-2:]]) | pandas.concat |
# code inspired from: https://keras.io/examples/generative/vae/
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
import os
import pandas as pd
from collections import Counter
from imblearn.datasets import make_imbalance
from keras.models import load_model
from keras.utils import to_categorical
from sklearn.utils import shuffle
class Sampling(layers.Layer):
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
latent_dim = 2
encoder_inputs = keras.Input(shape=(30, 30, 1))
x = layers.Conv2D(8, 4, activation="relu", strides=2, padding="same")(encoder_inputs)
x = layers.Conv2D(16, 4, activation="relu", strides=2, padding="same")(x)
x = layers.Flatten()(x)
x = layers.Dense(8, activation="relu")(x)
z_mean = layers.Dense(latent_dim, name="z_mean")(x)
z_log_var = layers.Dense(latent_dim, name="z_log_var")(x)
z = Sampling()([z_mean, z_log_var])
encoder = keras.Model(encoder_inputs, [z_mean, z_log_var, z], name="encoder")
encoder.summary()
latent_inputs = keras.Input(shape=(latent_dim,))
x = layers.Dense(8 * 8 * 16, activation="relu")(latent_inputs)
x = layers.Reshape((8, 8, 16))(x)
x = layers.Conv2DTranspose(16, 4, activation="relu", strides=2, padding="same", output_padding=1)(x)
x = layers.Conv2DTranspose(8, 4, activation="relu", strides=2, padding="same")(x)
decoder_outputs = layers.Conv2DTranspose(1, 3, activation="sigmoid", padding="same")(x)
decoder = keras.Model(latent_inputs, decoder_outputs, name="decoder")
decoder.summary()
class VAE(keras.Model):
def __init__(self, encoder, decoder, **kwargs):
super(VAE, self).__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
self.total_loss_tracker = keras.metrics.Mean(name="total_loss")
self.reconstruction_loss_tracker = keras.metrics.Mean(
name="reconstruction_loss"
)
self.kl_loss_tracker = keras.metrics.Mean(name="kl_loss")
@property
def metrics(self):
return [
self.total_loss_tracker,
self.reconstruction_loss_tracker,
self.kl_loss_tracker,
]
def train_step(self, data):
with tf.GradientTape() as tape:
z_mean, z_log_var, z = self.encoder(data)
reconstruction = self.decoder(z)
reconstruction_loss = tf.reduce_mean(
tf.reduce_sum(
keras.losses.binary_crossentropy(data, reconstruction), axis=(1, 2)
)
)
kl_loss = -0.5 * (1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var))
kl_loss = tf.reduce_mean(tf.reduce_sum(kl_loss, axis=1))
total_loss = reconstruction_loss + kl_loss
grads = tape.gradient(total_loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
self.total_loss_tracker.update_state(total_loss)
self.reconstruction_loss_tracker.update_state(reconstruction_loss)
self.kl_loss_tracker.update_state(kl_loss)
return {
"loss": self.total_loss_tracker.result(),
"reconstruction_loss": self.reconstruction_loss_tracker.result(),
"kl_loss": self.kl_loss_tracker.result(),
}
def test_step(self, data):
if isinstance(data, tuple):
data = data[0]
z_mean, z_log_var, z = encoder(data)
reconstruction = decoder(z)
reconstruction_loss = tf.reduce_mean(
keras.losses.binary_crossentropy(data, reconstruction)
)
reconstruction_loss *= 30 * 30
kl_loss = 1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var)
kl_loss = tf.reduce_mean(kl_loss)
kl_loss *= -0.5
total_loss = reconstruction_loss + kl_loss
return {
"loss": total_loss,
"reconstruction_loss": reconstruction_loss,
"kl_loss": kl_loss,
}
minorities = [int(0), int(4), int(5)]
for minority in minorities:
os.chdir('/content/gdrive/My Drive/training_testing_data/')
train = pd.read_csv('train_data_rp_3_IMBALANCED.csv')
train.columns = [*train.columns[:-1], 'zone']
train = train[train['zone'] == minority]
train = train.sample(frac=1).reset_index(drop=True)
x_train = train.iloc[:, :-1]
x_train = x_train.values
Y_train = train.iloc[:, -1:]
Y_train = Y_train.values
x_train = x_train.reshape((x_train.shape[0], 30, 30, 1))
vae = VAE(encoder, decoder)
vae.compile(optimizer=keras.optimizers.Adam(learning_rate=0.0001))
my_callbacks = [EarlyStopping(patience=3000), ModelCheckpoint('model_vae.h5', monitor='val_loss', verbose=2,
save_best_only=True)]
vae.fit(x_train, epochs=30000, batch_size=23, verbose=2, callbacks=my_callbacks, shuffle='TRUE',
validation_split=0.1)
decoder.save('VAE_DECODER_model_900_CNN_' + str(minority) + '.h5')
print(str(minority) + "DONE...")
os.chdir('/content/gdrive/My Drive/training_testing_data/')
n_pixels = 900
train = pd.read_csv('train_data_rp_3_IMBALANCED.csv')
x_train = train.iloc[:, :-1]
x_train = x_train.values
y_train = train.iloc[:, -1:]
y_train = y_train.values
train_imbalanced = np.hstack([x_train, y_train])
def sample_latent_space(minority):
print("working on minority: ", minority)
n_sample = 6732
mu, sigma = 0.0, 1.0
z1 = np.random.normal(mu, sigma, n_sample)
z2 = np.random.normal(mu, sigma, n_sample)
decoder = load_model('VAE_DECODER_model_900_CNN_' + str(minority) + '.h5')
decoded_all = np.zeros((1, n_pixels))
for i in range(len(z1)):
z_sample = np.array([[z1[i], z2[i]]])
x_decoded = decoder.predict(z_sample)
decoded = x_decoded.reshape((1, n_pixels))
decoded_all = np.vstack([decoded_all, decoded])
decoded_all = np.delete(decoded_all, (0), axis=0)
zones = np.ones((6732, 1))
zones = zones * minority
decoded_all = np.hstack([decoded_all, zones])
decoded_all = | pd.DataFrame(decoded_all) | pandas.DataFrame |
import tarfile
import logging
import os
import pandas as pd
import shutil
import torch
import tqdm
import urllib.request
import numpy as np
from pathlib import Path
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset, DataLoader
from typing import List, Dict
from collections.abc import Iterable
pd.options.mode.chained_assignment = None # ignore annoying pandas warnings
DATASETS = ['apparel', 'baby', 'books', 'camera_photo', 'electronics',
'health_personal_care', 'imdb', 'kitchen_housewares', 'magazines',
'music', 'software', 'sports_outdoors', 'toys_games', 'video', 'MR', 'dvd']
class SingleTaskDataset(Dataset):
def __init__(self, data):
"""Simple wrapper dataset around a DataFrame."""
self.data = data
def __getitem__(self, idx):
return self.data.loc[idx, :].to_dict()
def __len__(self):
return len(self.data)
class MultiTaskDataset(Dataset):
def __init__(self, tokenizer, data_dir='data/mtl-dataset/', split: str = 'train', collapse_domains: bool = True,
keep_datasets: List[str] = DATASETS, validation_size: float = 0.2, min_count: int = 0,
random_state: int = 42, load: bool=True, save: bool=True, const_len: bool = False):
"""
Dataset class for multi task learning.
Reads data from Fudan review dataset, containing data for sentiment classification in different
domains.
Parameters
---
tokenizer: huggingface bert tokenizer
data_dir: Union[str, Path]
Directory for Fudan valdata.
split: str in {'train', 'val', 'test', 'all'}
Indicate which datasplit we want. This is not a split over domain, but over samples.
collapse_domains: bool. Not used at the moment, since we have methods to return individual domains anyway.
If True, make one big iterator from all datasets. This means that different domains will be present
in the same batch.
keep_datasets: List[str]
Which domains to keep in memory. Specify domain such as ['apparel', 'baby'].
By default, all known datasets (found in DATASETS) are read in and tokenized, so that
we can save them and not have to tokenize again. We then filter on keep_datasets.
validation_size: should be in [0, 1].
Fraction of validation samples in train set.
min_count: NOT IMPLEMENTED
Token should appear min_count times to be counted in vocabulary.
random_state:
Used for validation split.
load:
Flag indicating whether to load set from a file if it exists.
TODO: implement different filename for different datasets loaded.
save:
Whether to save processed data to a file. Currently always saves to data_dir / 'processed_data.pt'
"""
assert split in ('train', 'val', 'test', 'all'), 'provide correct data split'
self.data_dir = Path(data_dir)
self.split = split
self.collapse_domains = collapse_domains
self.keep_datasets = keep_datasets
self.random_state = random_state
# load and process data
store_processed = self.data_dir / (
self.split + f'_random-{random_state}' + f'_valsize-{validation_size}' + '_processed_data.pt')
if store_processed.exists() and load:
logging.info(f'loading data from file: {store_processed}')
self.data = torch.load(store_processed)
else:
self.data = self._read_datasets(validation_size=validation_size)
self.data['tokenized'] = self._tokenize_data(self.data['text'], tokenizer)
if save:
torch.save(self.data, store_processed)
# filter rows with domain in keep_datasets
self.data = self.data.loc[self.data['domain'].isin(keep_datasets), :].reset_index(drop=True)
self.collator = MultiTaskCollator(tokenizer, const_len)
def _read_datasets(self, validation_size: float = 0.2):
"""
Read datasets from file. If data directory does not exist, downloads data.
Parameters
----------
validation_size: float in [0, 1]
Returns
---
pd.DataFrame
Appropriate datasplit with fields 'label', 'text', and 'domain'.
"""
dfs = []
col_names = ['label', 'text']
if not self.data_dir.exists():
download_and_extract_fudan(self.data_dir)
for idx, dataset in enumerate(DATASETS):
logging.info(f'processing dataset: {dataset}')
train_set, val_set, test_set = None, None, None
if self.split in ('train', 'val', 'all'):
train_file = dataset + '.task.train'
train_val_set = pd.read_csv(self.data_dir / train_file, sep='\t', header=None, names=col_names, engine='python')
if validation_size == 0: # only do split when validation_size > 0
train_set = train_val_set
else:
train_set, val_set = train_test_split(train_val_set, test_size=validation_size,
random_state=self.random_state)
val_set['domain'] = dataset
train_set['domain'] = dataset # record which domain it is in dataframe
elif self.split in ('test', 'all'):
test_file = dataset + '.task.test'
test_set = | pd.read_csv(self.data_dir / test_file, sep='\t', header=None, names=col_names, engine='python') | pandas.read_csv |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
from contextlib import nullcontext
import glob
import json
import numpy as np
import pandas
import pytest
import modin.experimental.pandas as pd
from modin.config import Engine
from modin.utils import get_current_execution
from modin.pandas.test.utils import (
df_equals,
get_unique_filename,
teardown_test_files,
test_data,
)
from modin.test.test_utils import warns_that_defaulting_to_pandas
from modin.pandas.test.utils import parse_dates_values_by_id, time_parsing_csv_path
@pytest.mark.skipif(
Engine.get() == "Dask",
reason="Dask does not have experimental API",
)
def test_from_sql_distributed(make_sql_connection): # noqa: F811
if Engine.get() == "Ray":
filename = "test_from_sql_distributed.db"
table = "test_from_sql_distributed"
conn = make_sql_connection(filename, table)
query = "select * from {0}".format(table)
pandas_df = pandas.read_sql(query, conn)
modin_df_from_query = pd.read_sql(
query,
conn,
partition_column="col1",
lower_bound=0,
upper_bound=6,
max_sessions=2,
)
modin_df_from_table = pd.read_sql(
table,
conn,
partition_column="col1",
lower_bound=0,
upper_bound=6,
max_sessions=2,
)
df_equals(modin_df_from_query, pandas_df)
df_equals(modin_df_from_table, pandas_df)
@pytest.mark.skipif(
Engine.get() == "Dask",
reason="Dask does not have experimental API",
)
def test_from_sql_defaults(make_sql_connection): # noqa: F811
filename = "test_from_sql_distributed.db"
table = "test_from_sql_distributed"
conn = make_sql_connection(filename, table)
query = "select * from {0}".format(table)
pandas_df = pandas.read_sql(query, conn)
with pytest.warns(UserWarning):
modin_df_from_query = pd.read_sql(query, conn)
with pytest.warns(UserWarning):
modin_df_from_table = pd.read_sql(table, conn)
df_equals(modin_df_from_query, pandas_df)
df_equals(modin_df_from_table, pandas_df)
@pytest.mark.usefixtures("TestReadGlobCSVFixture")
@pytest.mark.skipif(
Engine.get() != "Ray", reason="Currently only support Ray engine for glob paths."
)
class TestCsvGlob:
def test_read_multiple_small_csv(self): # noqa: F811
pandas_df = pandas.concat([pandas.read_csv(fname) for fname in pytest.files])
modin_df = pd.read_csv_glob(pytest.glob_path)
# Indexes get messed up when concatting so we reset both.
pandas_df = pandas_df.reset_index(drop=True)
modin_df = modin_df.reset_index(drop=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("nrows", [35, 100])
def test_read_multiple_csv_nrows(self, request, nrows): # noqa: F811
pandas_df = pandas.concat([ | pandas.read_csv(fname) | pandas.read_csv |
import builtins
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
isna,
)
import pandas._testing as tm
@pytest.mark.parametrize("agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"vals",
[
["foo", "bar", "baz"],
["foo", "", ""],
["", "", ""],
[1, 2, 3],
[1, 0, 0],
[0, 0, 0],
[1.0, 2.0, 3.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[True, True, True],
[True, False, False],
[False, False, False],
[np.nan, np.nan, np.nan],
],
)
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == "any":
exp = False
exp_df = DataFrame([exp] * 2, columns=["val"], index=Index(["a", "b"], name="key"))
result = getattr(df.groupby("key"), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_any():
df = DataFrame(
[[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]],
columns=["A", "B", "C"],
)
expected = DataFrame(
[[True, True], [False, True]], columns=["B", "C"], index=[1, 3]
)
expected.index.name = "A"
result = df.groupby("A").any()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("bool_agg_func", ["any", "all"])
def test_bool_aggs_dup_column_labels(bool_agg_func):
# 21668
df = DataFrame([[True, True]], columns=["a", "a"])
grp_by = df.groupby([0])
result = getattr(grp_by, bool_agg_func)()
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("bool_agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"data",
[
[False, False, False],
[True, True, True],
[pd.NA, pd.NA, pd.NA],
[False, pd.NA, False],
[True, pd.NA, True],
[True, pd.NA, False],
],
)
def test_masked_kleene_logic(bool_agg_func, skipna, data):
# GH#37506
ser = Series(data, dtype="boolean")
# The result should match aggregating on the whole series. Correctness
# there is verified in test_reductions.py::test_any_all_boolean_kleene_logic
expected_data = getattr(ser, bool_agg_func)(skipna=skipna)
expected = Series(expected_data, dtype="boolean")
result = ser.groupby([0, 0, 0]).agg(bool_agg_func, skipna=skipna)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dtype1,dtype2,exp_col1,exp_col2",
[
(
"float",
"Float64",
np.array([True], dtype=bool),
pd.array([pd.NA], dtype="boolean"),
),
(
"Int64",
"float",
pd.array([pd.NA], dtype="boolean"),
np.array([True], dtype=bool),
),
(
"Int64",
"Int64",
pd.array([pd.NA], dtype="boolean"),
pd.array([pd.NA], dtype="boolean"),
),
(
"Float64",
"boolean",
pd.array([pd.NA], dtype="boolean"),
pd.array([pd.NA], dtype="boolean"),
),
],
)
def test_masked_mixed_types(dtype1, dtype2, exp_col1, exp_col2):
# GH#37506
data = [1.0, np.nan]
df = DataFrame(
{"col1": pd.array(data, dtype=dtype1), "col2": pd.array(data, dtype=dtype2)}
)
result = df.groupby([1, 1]).agg("all", skipna=False)
expected = | DataFrame({"col1": exp_col1, "col2": exp_col2}, index=[1]) | pandas.DataFrame |
import copy
import itertools
import re
import operator
from datetime import datetime, timedelta
from collections import defaultdict
import numpy as np
from pandas.core.base import PandasObject
from pandas.core.common import (_possibly_downcast_to_dtype, isnull,
_NS_DTYPE, _TD_DTYPE, ABCSeries, is_list_like,
ABCSparseSeries, _infer_dtype_from_scalar,
is_null_datelike_scalar, _maybe_promote,
is_timedelta64_dtype, is_datetime64_dtype,
array_equivalent, _maybe_convert_string_to_object,
is_categorical, needs_i8_conversion, is_datetimelike_v_numeric)
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import maybe_convert_indices, length_of_indexer
from pandas.core.categorical import Categorical, maybe_to_categorical
import pandas.core.common as com
from pandas.sparse.array import _maybe_to_sparse, SparseArray
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.computation.expressions as expressions
from pandas.util.decorators import cache_readonly
from pandas.tslib import Timestamp, Timedelta
from pandas import compat
from pandas.compat import range, map, zip, u
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
from pandas.lib import BlockPlacement
class Block(PandasObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
_can_hold_na = False
_downcast_dtype = None
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False):
if ndim is None:
ndim = values.ndim
elif values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
self.ndim = ndim
self.mgr_locs = placement
self.values = values
if len(self.mgr_locs) != len(self.values):
raise ValueError('Wrong number of items passed %d,'
' placement implies %d' % (
len(self.values), len(self.mgr_locs)))
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_astype(self, dtype):
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
"""
if com.is_categorical_dtype(dtype):
if dtype == com.CategoricalDtype():
return True
# this is a pd.Categorical, but is not
# a valid type for astypeing
raise TypeError("invalid type {0} for astype".format(dtype))
return False
def to_dense(self):
return self.values.view()
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return self.dtype
def make_block_same_class(self, values, placement, copy=False, fastpath=True,
**kwargs):
"""
Wrap given values in a block of same type as self.
`kwargs` are used in SparseBlock override.
"""
if copy:
values = values.copy()
return make_block(values, placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not isinstance(new_mgr_locs, BlockPlacement):
new_mgr_locs = BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
def __unicode__(self):
# don't want to print out all of the items here
name = com.pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = '%s: %s dtype: %s' % (
name, len(self), self.dtype)
else:
shape = ' x '.join([com.pprint_thing(s) for s in self.shape])
result = '%s: %s, %s, dtype: %s' % (
name, com.pprint_thing(self.mgr_locs.indexer), shape,
self.dtype)
return result
def __len__(self):
return len(self.values)
def __getstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def reshape_nd(self, labels, shape, ref_items):
"""
Parameters
----------
labels : list of new axis labels
shape : new shape
ref_items : new ref_items
return a new block that is transformed to a nd block
"""
return _block2d_to_blocknd(
values=self.get_values().T,
placement=self.mgr_locs,
shape=shape,
labels=labels,
ref_items=ref_items)
def getitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __getitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if isinstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
return "%s:%s" % (self.dtype, self._ftype)
def merge(self, other):
return _merge_blocks([self, other])
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
if fill_value is None:
fill_value = self.fill_value
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)
def get(self, item):
loc = self.items.get_loc(item)
return self.values[loc]
def iget(self, i):
return self.values[i]
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def apply(self, func, **kwargs):
""" apply the function to my values; return a block if we are not one """
result = func(self.values, **kwargs)
if not isinstance(result, Block):
result = make_block(values=_block_shape(result), placement=self.mgr_locs,)
return result
def fillna(self, value, limit=None, inplace=False, downcast=None):
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
mask = isnull(self.values)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim-1) > limit] = False
value = self._try_fill(value)
blocks = self.putmask(mask, value, inplace=inplace)
return self._maybe_downcast(blocks, downcast)
def _maybe_downcast(self, blocks, downcast=None):
# no need to downcast our float
# unless indicated
if downcast is None and self.is_float:
return blocks
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
result_blocks = []
for b in blocks:
result_blocks.extend(b.downcast(downcast))
return result_blocks
def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return [self]
values = self.values
# single block handling
if self._is_single_block:
# try to cast all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = _possibly_downcast_to_dtype(values, dtypes)
return [make_block(nv, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
# ndim > 1
if dtypes is None:
return [self]
if not (dtypes == 'infer' or isinstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# item-by-item
# this is expensive as it splits the blocks items-by-item
blocks = []
for i, rl in enumerate(self.mgr_locs):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
dtype = dtypes.get(item, self._downcast_dtype)
if dtype is None:
nv = _block_shape(values[i], ndim=self.ndim)
else:
nv = _possibly_downcast_to_dtype(values[i], dtype)
nv = _block_shape(nv, ndim=self.ndim)
blocks.append(make_block(nv,
ndim=self.ndim, fastpath=True,
placement=[rl]))
return blocks
def astype(self, dtype, copy=False, raise_on_error=True, values=None, **kwargs):
return self._astype(dtype, copy=copy, raise_on_error=raise_on_error,
values=values, **kwargs)
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None, **kwargs):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
# may need to convert to categorical
# this is only called for non-categoricals
if self.is_categorical_astype(dtype):
return make_block(Categorical(self.values, **kwargs),
ndim=self.ndim,
placement=self.mgr_locs)
# astype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if copy:
return self.copy()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the copy here
if values is None:
# _astype_nansafe works fine with 1-d only
values = com._astype_nansafe(self.values.ravel(), dtype, copy=True)
values = values.reshape(self.values.shape)
newb = make_block(values,
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True, dtype=dtype, klass=klass)
except:
if raise_on_error is True:
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError("cannot set astype for copy = [%s] for dtype "
"(%s [%s]) with smaller itemsize that current "
"(%s [%s])" % (copy, self.dtype.name,
self.itemsize, newb.dtype.name,
newb.itemsize))
return newb
def convert(self, copy=True, **kwargs):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we are not an ObjectBlock here! """
return [self.copy()] if copy else [self]
def _can_hold_element(self, value):
raise NotImplementedError()
def _try_cast(self, value):
raise NotImplementedError()
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type,
we may have roundtripped thru object in the mean-time """
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if isinstance(dtype, compat.string_types) and dtype == 'infer':
return result
if not isinstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if isnull(result).all():
return result.astype(np.bool_)
else:
result = result.astype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.astype(np.object_)
return result
# may need to change the dtype here
return _possibly_downcast_to_dtype(result, dtype)
def _try_operate(self, values):
""" return a version to operate on as the input """
return values
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def _try_coerce_and_cast_result(self, result, dtype=None):
result = self._try_coerce_result(result)
result = self._try_cast_result(result, dtype=dtype)
return result
def _try_fill(self, value):
return value
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
if not self.is_object and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
# block actions ####
def copy(self, deep=True):
values = self.values
if deep:
values = values.copy()
return make_block(values, ndim=self.ndim,
klass=self.__class__, fastpath=True,
placement=self.mgr_locs)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
""" replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
mask = com.mask_missing(self.values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
if not mask.any():
if inplace:
return [self]
return [self.copy()]
return self.putmask(mask, value, inplace=inplace)
def setitem(self, indexer, value):
""" set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce args
values, value = self._try_coerce_args(self.values, value)
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = com._maybe_promote(arr_value.dtype)
values = values.astype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = len(values)
# length checking
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (isinstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
len(indexer[indexer]) == len(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different length than the value")
# slice
elif isinstance(indexer, slice):
if is_list_like(value) and l:
if len(value) != length_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different length than the value")
try:
def _is_scalar_indexer(indexer):
# return True if we are all scalar indexers
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return all([ np.isscalar(idx) for idx in indexer ])
return False
def _is_empty_indexer(indexer):
# return a boolean if we have an empty indexer
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
# empty indexers
# 8669 (empty)
if _is_empty_indexer(indexer):
pass
# setting a single element for each dim and with a rhs that could be say a list
# GH 6043
elif _is_scalar_indexer(indexer):
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif len(arr_value.shape) and arr_value.shape[0] == values.shape[0] and np.prod(arr_value.shape) == np.prod(values.shape):
values[indexer] = value
values = values.astype(arr_value.dtype)
# set
else:
values[indexer] = value
# coerce and try to infer the dtypes of the result
if np.isscalar(value):
dtype, _ = _infer_dtype_from_scalar(value)
else:
dtype = 'infer'
values = self._try_coerce_and_cast_result(values, dtype)
block = make_block(transf(values),
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True)
# may have to soft convert_objects here
if block.is_object and not self.is_object:
block = block.convert(numeric=False)
return block
except (ValueError, TypeError) as detail:
raise
except Exception as detail:
pass
return [self]
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
# may need to align the new
if hasattr(new, 'reindex_axis'):
new = new.values.T
# may need to align the mask
if hasattr(mask, 'reindex_axis'):
mask = mask.values.T
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isnull(new) and not self.is_object:
new = self.fill_value
if self._can_hold_element(new):
new = self._try_cast(new)
# pseudo-broadcast
if isinstance(new, np.ndarray) and new.ndim == self.ndim - 1:
new = np.repeat(new, self.shape[-1]).reshape(self.shape)
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
# need to go column by column
new_blocks = []
if self.ndim > 1:
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.any():
n = new[i] if isinstance(
new, np.ndarray) else np.array(new)
# type of the new block
dtype, _ = com._maybe_promote(n.dtype)
# we need to exiplicty astype here to make a copy
n = n.astype(dtype)
nv = _putmask_smart(v, m, n)
else:
nv = v if inplace else v.copy()
# Put back the dimension that was taken from it and make
# a block out of the result.
block = make_block(values=nv[np.newaxis],
placement=[ref_loc],
fastpath=True)
new_blocks.append(block)
else:
nv = _putmask_smart(new_values, mask, new)
new_blocks.append(make_block(values=nv,
placement=self.mgr_locs,
fastpath=True))
return new_blocks
if inplace:
return [self]
return [make_block(new_values,
placement=self.mgr_locs, fastpath=True)]
def interpolate(self, method='pad', axis=0, index=None,
values=None, inplace=False, limit=None,
fill_value=None, coerce=False, downcast=None, **kwargs):
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.copy()
# a fill na type method
try:
m = com._clean_fill_method(method)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(method=m,
axis=axis,
inplace=inplace,
limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast)
# try an interp method
try:
m = com._clean_interp_method(method, **kwargs)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(method=m,
index=index,
values=values,
axis=axis,
limit=limit,
fill_value=fill_value,
inplace=inplace,
downcast=downcast,
**kwargs)
raise ValueError("invalid method '{0}' to interpolate.".format(method))
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
downcast=None):
""" fillna but using the interpolate machinery """
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
fill_value = self._try_fill(fill_value)
values = self.values if inplace else self.values.copy()
values = self._try_operate(values)
values = com.interpolate_2d(values,
method=method,
axis=axis,
limit=limit,
fill_value=fill_value,
dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [make_block(values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
inplace=False, downcast=None, **kwargs):
""" interpolate using scipy wrappers """
data = self.values if inplace else self.values.copy()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.astype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ('krogh', 'piecewise_polynomial', 'pchip'):
if not index.is_monotonic:
raise ValueError("{0} interpolation requires that the "
"index be monotonic.".format(method))
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
# i.e. not an arg to com.interpolate_1d
return com.interpolate_1d(index, x, method=method, limit=limit,
fill_value=fill_value,
bounds_error=False, **kwargs)
# interp each column independently
interp_values = np.apply_along_axis(func, axis, data)
blocks = [make_block(interp_values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = self.fill_value
new_values = com.take_nd(self.get_values(), indexer, axis=axis,
allow_fill=False)
else:
fill_value = fill_tuple[0]
new_values = com.take_nd(self.get_values(), indexer, axis=axis,
allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
if axis == 0:
slc = lib.indexer_as_slice(indexer)
if slc is not None:
new_mgr_locs = self.mgr_locs[slc]
else:
new_mgr_locs = self.mgr_locs[indexer]
else:
new_mgr_locs = self.mgr_locs
if new_values.dtype != self.dtype:
return make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def get_values(self, dtype=None):
return self.values
def diff(self, n, axis=1):
""" return block for the diff of the values """
new_values = com.diff(self.values, n, axis=axis)
return [make_block(values=new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def shift(self, periods, axis=0):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, com._ensure_platform_int(periods), axis=axis)
axis_indexer = [ slice(None) ] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None,periods)
else:
axis_indexer[axis] = slice(periods,None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def eval(self, func, other, raise_on_error=True, try_cast=False):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
Returns
-------
a new block, the result of the func
"""
values = self.values
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim:
is_transposed = True
else:
if values.shape == other.shape[::-1]:
is_transposed = True
elif values.shape[0] == other.shape[-1]:
is_transposed = True
else:
# this is a broadcast error heree
raise ValueError("cannot broadcast shape [%s] with block "
"values [%s]" % (values.T.shape,
other.shape))
transf = (lambda x: x.T) if is_transposed else (lambda x: x)
# coerce/transpose the args if needed
values, other = self._try_coerce_args(transf(values), other)
# get the result, may need to transpose the other
def get_result(other):
return self._try_coerce_result(func(values, other))
# error handler if we have an issue operating with the function
def handle_error():
if raise_on_error:
raise TypeError('Could not operate %s with block values %s'
% (repr(other), str(detail)))
else:
# return the values
result = np.empty(values.shape, dtype='O')
result.fill(np.nan)
return result
# get the result
try:
result = get_result(other)
# if we have an invalid shape/broadcast error
# GH4576, so raise instead of allowing to pass through
except ValueError as detail:
raise
except Exception as detail:
result = handle_error()
# technically a broadcast error in numpy can 'work' by returning a
# boolean False
if not isinstance(result, np.ndarray):
if not isinstance(result, np.ndarray):
# differentiate between an invalid ndarray-ndarray comparison
# and an invalid type comparison
if isinstance(values, np.ndarray) and is_list_like(other):
raise ValueError('Invalid broadcasting comparison [%s] '
'with block values' % repr(other))
raise TypeError('Could not compare [%s] with block values'
% repr(other))
# transpose if needed
result = transf(result)
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return [make_block(result, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
def where(self, other, cond, align=True, raise_on_error=True,
try_cast=False):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
Returns
-------
a new block(s), the result of the func
"""
values = self.values
# see if we can align other
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim or values.shape == other.shape[::-1]:
# if its symmetric are ok, no reshaping needed (GH 7506)
if (values.shape[0] == np.array(values.shape)).all():
pass
# pseodo broadcast (its a 2d vs 1d say and where needs it in a
# specific direction)
elif (other.ndim >= 1 and values.ndim - 1 == other.ndim and
values.shape[0] != other.shape[0]):
other = _block_shape(other).T
else:
values = values.T
is_transposed = True
# see if we can align cond
if not hasattr(cond, 'shape'):
raise ValueError(
"where must have a condition that is ndarray like")
if hasattr(cond, 'reindex_axis'):
cond = cond.values
# may need to undo transpose of values
if hasattr(values, 'ndim'):
if values.ndim != cond.ndim or values.shape == cond.shape[::-1]:
values = values.T
is_transposed = not is_transposed
other = _maybe_convert_string_to_object(other)
# our where function
def func(c, v, o):
if c.ravel().all():
return v
v, o = self._try_coerce_args(v, o)
try:
return self._try_coerce_result(
expressions.where(c, v, o, raise_on_error=True)
)
except Exception as detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values '
'[%s]' % (repr(o), str(detail)))
else:
# return the values
result = np.empty(v.shape, dtype='float64')
result.fill(np.nan)
return result
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
result = func(cond, values, other)
if self._can_hold_na or self.ndim == 1:
if not isinstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
if is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return make_block(result,
ndim=self.ndim, placement=self.mgr_locs)
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].all() for i in range(cond.shape[0])],
dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.any():
r = self._try_cast_result(
result.take(m.nonzero()[0], axis=axis))
result_blocks.append(make_block(r.T,
placement=self.mgr_locs[m]))
return result_blocks
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
return array_equivalent(self.values, other.values)
class NonConsolidatableMixIn(object):
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
_holder = None
def __init__(self, values, placement,
ndim=None, fastpath=False,):
# Placement must be converted to BlockPlacement via property setter
# before ndim logic, because placement may be a slice which doesn't
# have a length.
self.mgr_locs = placement
# kludgetastic
if ndim is None:
if len(self.mgr_locs) != 1:
ndim = 1
else:
ndim = 2
self.ndim = ndim
if not isinstance(values, self._holder):
raise TypeError("values must be {0}".format(self._holder.__name__))
self.values = values
def get_values(self, dtype=None):
""" need to to_dense myself (and always return a ndim sized object) """
values = self.values.to_dense()
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
col, loc = col
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
else:
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values
def should_store(self, value):
return isinstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.tolist() == [0]
self.values = values
def get(self, item):
if self.ndim == 1:
loc = self.items.get_loc(item)
return self.values[loc]
else:
return self.values
def _slice(self, slicer):
""" return a slice of my values (but densify first) """
return self.get_values()[slicer]
def _try_cast_result(self, result, dtype=None):
return result
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
left, right = self.values, other.values
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
_downcast_dtype = 'int64'
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, (np.floating, np.integer)) and not issubclass(
tipo, (np.datetime64, np.timedelta64))
return isinstance(element, (float, int, np.float_, np.int_)) and not isinstance(
element, (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64))
def _try_cast(self, element):
try:
return float(element)
except: # pragma: no cover
return element
def to_native_types(self, slicer=None, na_rep='', float_format=None, decimal='.',
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
formatter = None
if float_format and decimal != '.':
formatter = lambda v : (float_format % v).replace('.',decimal,1)
elif decimal != '.':
formatter = lambda v : ('%g' % v).replace('.',decimal,1)
elif float_format:
formatter = lambda v : float_format % v
if formatter is None and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
if formatter:
imask = (~mask).ravel()
values.flat[imask] = np.array(
[formatter(val) for val in values.ravel()[imask]])
return values
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return (issubclass(value.dtype.type, np.floating) and
value.dtype == self.dtype)
class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, (np.floating, np.integer, np.complexfloating))
return (isinstance(element, (float, int, complex, np.float_, np.int_)) and
not isinstance(bool, np.bool_))
def _try_cast(self, element):
try:
return complex(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
__slots__ = ()
is_integer = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, np.integer) and not issubclass(tipo, (np.datetime64, np.timedelta64))
return com.is_integer(element)
def _try_cast(self, element):
try:
return int(element)
except: # pragma: no cover
return element
def should_store(self, value):
return com.is_integer_dtype(value) and value.dtype == self.dtype
class TimeDeltaBlock(IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
value = tslib.iNaT
elif isinstance(value, Timedelta):
value = value.value
elif isinstance(value, np.timedelta64):
pass
elif com.is_integer(value):
# coerce to seconds of timedelta
value = np.timedelta64(int(value * 1e9))
elif isinstance(value, timedelta):
value = np.timedelta64(value)
return value
def _try_coerce_args(self, values, other):
""" Coerce values and other to float64, with null values converted to
NaN. values is always ndarray-like, other may not be """
def masker(v):
mask = isnull(v)
v = v.astype('float64')
v[mask] = np.nan
return v
values = masker(values)
if is_null_datelike_scalar(other):
other = np.nan
elif isinstance(other, (np.timedelta64, Timedelta, timedelta)):
other = _coerce_scalar_to_timedelta_type(other, unit='s', box=False).item()
if other == tslib.iNaT:
other = np.nan
elif lib.isscalar(other):
other = np.float64(other)
else:
other = masker(other)
return values, other
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_result(self, result):
""" reverse of try_coerce_args / try_operate """
if isinstance(result, np.ndarray):
mask = isnull(result)
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('m8[ns]')
result[mask] = tslib.iNaT
elif isinstance(result, np.integer):
result = lib.Timedelta(result)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.timedelta64)
def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (~mask).ravel()
#### FIXME ####
# should use the core.format.Timedelta64Formatter here
# to figure what format to pass to the Timedelta
# e.g. to not show the decimals say
rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all')
for val in values.ravel()[imask]],
dtype=object)
return rvalues
def get_values(self, dtype=None):
# return object dtypes as Timedelta
if dtype == object:
return lib.map_infer(self.values.ravel(), lib.Timedelta
).reshape(self.values.shape)
return self.values
class BoolBlock(NumericBlock):
__slots__ = ()
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, np.integer)
return isinstance(element, (int, bool))
def _try_cast(self, element):
try:
return bool(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
return super(BoolBlock, self).replace(to_replace, value,
inplace=inplace, filter=filter,
regex=regex)
class ObjectBlock(Block):
__slots__ = ()
is_object = True
_can_hold_na = True
def __init__(self, values, ndim=2, fastpath=False,
placement=None):
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object)
super(ObjectBlock, self).__init__(values, ndim=ndim,
fastpath=fastpath,
placement=placement)
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type
object
"""
return lib.is_bool_array(self.values.ravel())
def convert(self, datetime=True, numeric=True, timedelta=True, coerce=False,
copy=True, by_item=True):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
# attempt to create new type blocks
blocks = []
if by_item and not self._is_single_block:
for i, rl in enumerate(self.mgr_locs):
values = self.iget(i)
values = com._possibly_convert_objects(
values.ravel(),
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy
).reshape(values.shape)
values = _block_shape(values, ndim=self.ndim)
newb = make_block(values,
ndim=self.ndim, placement=[rl])
blocks.append(newb)
else:
values = com._possibly_convert_objects(
self.values.ravel(),
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy
).reshape(self.values.shape)
blocks.append(make_block(values,
ndim=self.ndim, placement=self.mgr_locs))
return blocks
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
# GH6026
if check:
try:
if (self.values[locs] == values).all():
return
except:
pass
try:
self.values[locs] = values
except (ValueError):
# broadcasting error
# see GH6171
new_shape = list(values.shape)
new_shape[0] = len(self.items)
self.values = np.empty(tuple(new_shape),dtype=self.dtype)
self.values.fill(np.nan)
self.values[locs] = values
def _maybe_downcast(self, blocks, downcast=None):
if downcast is not None:
return blocks
# split and convert the blocks
result_blocks = []
for blk in blocks:
result_blocks.extend(blk.convert(datetime=True,
numeric=False))
return result_blocks
def _can_hold_element(self, element):
return True
def _try_cast(self, element):
return element
def should_store(self, value):
return not (issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_)) or com.is_categorical_dtype(value))
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
blk = [self]
to_rep_is_list = com.is_list_like(to_replace)
value_is_list = com.is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
if not either_list and com.is_re(to_replace):
blk[0], = blk[0]._replace_single(to_replace, value,
inplace=inplace, filter=filter,
regex=True)
elif not (either_list or regex):
blk = super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter, regex=regex)
elif both_lists:
for to_rep, v in zip(to_replace, value):
blk[0], = blk[0]._replace_single(to_rep, v, inplace=inplace,
filter=filter, regex=regex)
elif to_rep_is_list and regex:
for to_rep in to_replace:
blk[0], = blk[0]._replace_single(to_rep, value,
inplace=inplace,
filter=filter, regex=regex)
else:
blk[0], = blk[0]._replace_single(to_replace, value,
inplace=inplace, filter=filter,
regex=regex)
return blk
def _replace_single(self, to_replace, value, inplace=False, filter=None,
regex=False):
# to_replace is regex compilable
to_rep_re = regex and com.is_re_compilable(to_replace)
# regex is regex compilable
regex_re = com.is_re_compilable(regex)
# only one will survive
if to_rep_re and regex_re:
raise AssertionError('only one of to_replace and regex can be '
'regex compilable')
# if regex was passed as something that can be a regex (rather than a
# boolean)
if regex_re:
to_replace = regex
regex = regex_re or to_rep_re
# try to get the pattern attribute (compiled re) or it's a string
try:
pattern = to_replace.pattern
except AttributeError:
pattern = to_replace
# if the pattern is not empty and to_replace is either a string or a
# regex
if regex and pattern:
rx = re.compile(to_replace)
else:
# if the thing to replace is not a string or compiled regex call
# the superclass method -> to_replace is some kind of object
result = super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter,
regex=regex)
if not isinstance(result, list):
result = [result]
return result
new_values = self.values if inplace else self.values.copy()
# deal with replacing values with objects (strings) that match but
# whose replacement is not a string (numeric, nan, object)
if isnull(value) or not isinstance(value, compat.string_types):
def re_replacer(s):
try:
return value if rx.search(s) is not None else s
except TypeError:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gets returned
def re_replacer(s):
try:
return rx.sub(value, s)
except TypeError:
return s
f = np.vectorize(re_replacer, otypes=[self.dtype])
if filter is None:
filt = slice(None)
else:
filt = self.mgr_locs.isin(filter).nonzero()[0]
new_values[filt] = f(new_values[filt])
return [self if inplace else
make_block(new_values,
fastpath=True, placement=self.mgr_locs)]
class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
__slots__ = ()
is_categorical = True
_can_hold_na = True
_holder = Categorical
def __init__(self, values, placement,
fastpath=False, **kwargs):
# coerce to categorical if we can
super(CategoricalBlock, self).__init__(maybe_to_categorical(values),
fastpath=True, placement=placement,
**kwargs)
@property
def is_view(self):
""" I am never a view """
return False
def to_dense(self):
return self.values.to_dense().view()
def convert(self, copy=True, **kwargs):
return [self.copy() if copy else self]
@property
def shape(self):
return (len(self.mgr_locs), len(self.values))
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return np.object_
def _slice(self, slicer):
""" return a slice of my values """
# slice the category
# return same dims as we currently have
return self.values._slice(slicer)
def fillna(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
values = self.values if inplace else self.values.copy()
return [self.make_block_same_class(values=values.fillna(value=value,
limit=limit),
placement=self.mgr_locs)]
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = self.values if inplace else self.values.copy()
return self.make_block_same_class(values=values.fillna(fill_value=fill_value,
method=method,
limit=limit),
placement=self.mgr_locs)
def shift(self, periods, axis=0):
return self.make_block_same_class(values=self.values.shift(periods),
placement=self.mgr_locs)
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = None
else:
fill_value = fill_tuple[0]
# axis doesn't matter; we are really a single-dim object
# but are passed the axis depending on the calling routing
# if its REALLY axis 0, then this will be a reindex and not a take
new_values = self.values.take_nd(indexer, fill_value=fill_value)
# if we are a 1-dim object, then always place at 0
if self.ndim == 1:
new_mgr_locs = [0]
else:
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
return self.make_block_same_class(new_values, new_mgr_locs)
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
new_values[mask] = new
return [self.make_block_same_class(values=new_values, placement=self.mgr_locs)]
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
if self.is_categorical_astype(dtype):
values = self.values
else:
values = np.asarray(self.values).astype(dtype, copy=False)
if copy:
values = values.copy()
return make_block(values,
ndim=self.ndim,
placement=self.mgr_locs)
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
# Categorical is always one dimension
values = values[slicer]
mask = isnull(values)
values = np.array(values, dtype='object')
values[mask] = na_rep
# we are expected to return a 2-d ndarray
return values.reshape(1,len(values))
class DatetimeBlock(Block):
__slots__ = ()
is_datetime = True
_can_hold_na = True
def __init__(self, values, placement,
fastpath=False, **kwargs):
if values.dtype != _NS_DTYPE:
values = tslib.cast_to_nanoseconds(values)
super(DatetimeBlock, self).__init__(values,
fastpath=True, placement=placement,
**kwargs)
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return element.dtype == _NS_DTYPE or element.dtype == np.int64
return (com.is_integer(element) or
isinstance(element, datetime) or
isnull(element))
def _try_cast(self, element):
try:
return int(element)
except:
return element
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_args(self, values, other):
""" Coerce values and other to dtype 'i8'. NaN and NaT convert to
the smallest i8, and will correctly round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be """
values = values.view('i8')
if is_null_datelike_scalar(other):
other = tslib.iNaT
elif isinstance(other, datetime):
other = lib.Timestamp(other).asm8.view('i8')
elif hasattr(other, 'dtype') and com.is_integer_dtype(other):
other = other.view('i8')
else:
other = np.array(other, dtype='i8')
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('M8[ns]')
elif isinstance(result, (np.integer, np.datetime64)):
result = lib.Timestamp(result)
return result
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
value = tslib.iNaT
return value
def fillna(self, value, limit=None,
inplace=False, downcast=None):
# straight putmask here
values = self.values if inplace else self.values.copy()
mask = isnull(self.values)
value = self._try_fill(value)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim-1)>limit]=False
np.putmask(values, mask, value)
return [self if inplace else
make_block(values,
fastpath=True, placement=self.mgr_locs)]
def to_native_types(self, slicer=None, na_rep=None, date_format=None,
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
from pandas.core.format import _get_format_datetime64_from_values
format = _get_format_datetime64_from_values(values, date_format)
result = tslib.format_array_from_datetime(values.view('i8').ravel(),
tz=None,
format=format,
na_rep=na_rep).reshape(values.shape)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.datetime64)
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
if values.dtype != _NS_DTYPE:
# Workaround for numpy 1.6 bug
values = tslib.cast_to_nanoseconds(values)
self.values[locs] = values
def get_values(self, dtype=None):
# return object dtype as Timestamps
if dtype == object:
return lib.map_infer(self.values.ravel(), lib.Timestamp)\
.reshape(self.values.shape)
return self.values
class SparseBlock(NonConsolidatableMixIn, Block):
""" implement as a list of sparse arrays of the same dtype """
__slots__ = ()
is_sparse = True
is_numeric = True
_can_hold_na = True
_ftype = 'sparse'
_holder = SparseArray
@property
def shape(self):
return (len(self.mgr_locs), self.sp_index.length)
@property
def itemsize(self):
return self.dtype.itemsize
@property
def fill_value(self):
#return np.nan
return self.values.fill_value
@fill_value.setter
def fill_value(self, v):
# we may need to upcast our fill to match our dtype
if issubclass(self.dtype.type, np.floating):
v = float(v)
self.values.fill_value = v
@property
def sp_values(self):
return self.values.sp_values
@sp_values.setter
def sp_values(self, v):
# reset the sparse values
self.values = SparseArray(v, sparse_index=self.sp_index,
kind=self.kind, dtype=v.dtype,
fill_value=self.values.fill_value,
copy=False)
@property
def sp_index(self):
return self.values.sp_index
@property
def kind(self):
return self.values.kind
def __len__(self):
try:
return self.sp_index.length
except:
return 0
def copy(self, deep=True):
return self.make_block_same_class(values=self.values,
sparse_index=self.sp_index,
kind=self.kind, copy=deep,
placement=self.mgr_locs)
def make_block_same_class(self, values, placement,
sparse_index=None, kind=None, dtype=None,
fill_value=None, copy=False, fastpath=True):
""" return a new block """
if dtype is None:
dtype = self.dtype
if fill_value is None:
fill_value = self.values.fill_value
# if not isinstance(values, SparseArray) and values.ndim != self.ndim:
# raise ValueError("ndim mismatch")
if values.ndim == 2:
nitems = values.shape[0]
if nitems == 0:
# kludgy, but SparseBlocks cannot handle slices, where the
# output is 0-item, so let's convert it to a dense block: it
# won't take space since there's 0 items, plus it will preserve
# the dtype.
return make_block(np.empty(values.shape, dtype=dtype),
placement, fastpath=True,)
elif nitems > 1:
raise ValueError("Only 1-item 2d sparse blocks are supported")
else:
values = values.reshape(values.shape[1])
new_values = SparseArray(values, sparse_index=sparse_index,
kind=kind or self.kind, dtype=dtype,
fill_value=fill_value, copy=copy)
return make_block(new_values, ndim=self.ndim,
fastpath=fastpath, placement=placement)
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = com.interpolate_2d(
self.values.to_dense(), method, axis, limit, fill_value)
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
def fillna(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
if issubclass(self.dtype.type, np.floating):
value = float(value)
values = self.values if inplace else self.values.copy()
return [self.make_block_same_class(values=values.get_values(value),
fill_value=value,
placement=self.mgr_locs)]
def shift(self, periods, axis=0):
""" shift the block by periods """
N = len(self.values.T)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
new_values = self.values.to_dense().take(indexer)
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(new_values)
if periods > 0:
new_values[:periods] = fill_value
else:
new_values[periods:] = fill_value
return [self.make_block_same_class(new_values, placement=self.mgr_locs)]
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
# taking on the 0th axis always here
if fill_value is None:
fill_value = self.fill_value
return self.make_block_same_class(self.values.take(indexer),
fill_value=fill_value,
placement=self.mgr_locs)
def sparse_reindex(self, new_index):
""" sparse reindex and return a new block
current reindex only works for float64 dtype! """
values = self.values
values = values.sp_index.to_int_index().reindex(
values.sp_values.astype('float64'), values.fill_value, new_index)
return self.make_block_same_class(values, sparse_index=new_index,
placement=self.mgr_locs)
def make_block(values, placement, klass=None, ndim=None,
dtype=None, fastpath=False):
if klass is None:
dtype = dtype or values.dtype
vtype = dtype.type
if isinstance(values, SparseArray):
klass = SparseBlock
elif issubclass(vtype, np.floating):
klass = FloatBlock
elif (issubclass(vtype, np.integer) and
issubclass(vtype, np.timedelta64)):
klass = TimeDeltaBlock
elif (issubclass(vtype, np.integer) and
not issubclass(vtype, np.datetime64)):
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
elif issubclass(vtype, np.datetime64):
klass = DatetimeBlock
elif issubclass(vtype, np.complexfloating):
klass = ComplexBlock
elif is_categorical(values):
klass = CategoricalBlock
else:
klass = ObjectBlock
return klass(values, ndim=ndim, fastpath=fastpath,
placement=placement)
# TODO: flexible with index=None and/or items=None
class BlockManager(PandasObject):
"""
Core internal data structure to implement DataFrame
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
copy(deep=True)
get_dtype_counts
get_ftype_counts
get_dtypes
get_ftypes
apply(func, axes, block_filter_fn)
get_bool_data
get_numeric_data
get_slice(slice_like, axis)
get(label)
iget(loc)
get_scalar(label_tup)
take(indexer, axis)
reindex_axis(new_labels, axis)
reindex_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated',
'_is_consolidated', '_blknos', '_blklocs']
def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks)
for block in blocks:
if block.is_sparse:
if len(block.mgr_locs) != 1:
raise AssertionError("Sparse block refers to multiple items")
else:
if self.ndim != block.ndim:
raise AssertionError(('Number of Block dimensions (%d) must '
'equal number of axes (%d)')
% (block.ndim, self.ndim))
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [_ensure_index([])] + [
_ensure_index(a) for a in self.axes[1:]
]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self):
return len(self.axes)
def set_axis(self, axis, new_labels):
new_labels = | _ensure_index(new_labels) | pandas.core.index._ensure_index |
import pandas as pd
import numpy as np
from pandas.api.types import is_numeric_dtype, is_categorical, infer_dtype
def dataset_profile(data: pd.DataFrame):
"""A simple function to get you a simple dataset variables overview
Args:
data (pd.DataFrame): the dataset to be profiled
Returns:
pd.DataFrame: containing the report
"""
report = {}
for col in data.columns:
col_dict = {}
col_dict['feature_name'] = col
col_dict['inferred_type'] = infer_dtype(data[col])
col_dict['current_type'] = data[col].dtype
col_dict['missing_values_sum'] = data[col].isna().sum()
col_dict['missing_values_perc'] = data[col].isna().mean()
if infer_dtype(data[col]) in ["string", 'categorical', 'mixed']:
col_dict['n_unique_values'] = data[col].nunique()
col_dict['biggest_category'] = data[col].value_counts(normalize=True).nlargest(1).index[0]
col_dict['biggest_category_perc'] = data[col].value_counts(normalize=True, dropna=False).nlargest(1).values[0]
col_dict['smallest_category'] = data[col].value_counts(normalize=True).nsmallest(1).index[0]
col_dict['smallest_category_perc'] = data[col].value_counts(normalize=True, dropna=False).nsmallest(1).values[0]
else:
col_dict['n_unique_values'] = np.nan
col_dict['biggest_category'] = np.nan
col_dict['biggest_category_perc'] = np.nan
col_dict['smallest_category'] = np.nan
col_dict['smallest_category_perc'] = np.nan
if infer_dtype(data[col]) in ['floating', 'integer', 'mixed-integer', 'mixed-integer-float', 'decimal']:
col_dict['mean'] = pd.to_numeric(data[col], errors='coerce').mean()
col_dict['std'] = pd.to_numeric(data[col], errors='coerce').std()
col_dict['min'] = pd.to_numeric(data[col], errors='coerce').min()
col_dict['max'] = pd.to_numeric(data[col], errors='coerce').max()
else:
col_dict['mean'] = np.nan
col_dict['std'] = np.nan
col_dict['min'] = np.nan
col_dict['max'] = np.nan
report[col] = col_dict
results = | pd.DataFrame.from_dict(report) | pandas.DataFrame.from_dict |
from datetime import datetime, timedelta
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.ccalendar import DAYS, MONTHS
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.compat import lrange, range, zip
import pandas as pd
from pandas import DataFrame, Series, Timestamp
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.period import Period, PeriodIndex, period_range
from pandas.core.resample import _get_period_range_edges
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
import pandas.tseries.offsets as offsets
@pytest.fixture()
def _index_factory():
return period_range
@pytest.fixture
def _series_name():
return 'pi'
class TestPeriodIndex(object):
@pytest.mark.parametrize('freq', ['2D', '1H', '2H'])
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
def test_asfreq(self, series_and_frame, freq, kind):
# GH 12884, 15944
# make sure .asfreq() returns PeriodIndex (except kind='timestamp')
obj = series_and_frame
if kind == 'timestamp':
expected = obj.to_timestamp().resample(freq).asfreq()
else:
start = obj.index[0].to_timestamp(how='start')
end = (obj.index[-1] + obj.index.freq).to_timestamp(how='start')
new_index = date_range(start=start, end=end, freq=freq,
closed='left')
expected = obj.to_timestamp().reindex(new_index).to_period(freq)
result = obj.resample(freq, kind=kind).asfreq()
assert_almost_equal(result, expected)
def test_asfreq_fill_value(self, series):
# test for fill value during resampling, issue 3715
s = series
new_index = date_range(s.index[0].to_timestamp(how='start'),
(s.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = s.to_timestamp().reindex(new_index, fill_value=4.0)
result = s.resample('1H', kind='timestamp').asfreq(fill_value=4.0)
assert_series_equal(result, expected)
frame = s.to_frame('value')
new_index = date_range(frame.index[0].to_timestamp(how='start'),
(frame.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = frame.to_timestamp().reindex(new_index, fill_value=3.0)
result = frame.resample('1H', kind='timestamp').asfreq(fill_value=3.0)
assert_frame_equal(result, expected)
@pytest.mark.parametrize('freq', ['H', '12H', '2D', 'W'])
@pytest.mark.parametrize('kind', [None, 'period', 'timestamp'])
@pytest.mark.parametrize('kwargs', [dict(on='date'), dict(level='d')])
def test_selection(self, index, freq, kind, kwargs):
# This is a bug, these should be implemented
# GH 14008
rng = np.arange(len(index), dtype=np.int64)
df = DataFrame({'date': index, 'a': rng},
index=pd.MultiIndex.from_arrays([rng, index],
names=['v', 'd']))
msg = ("Resampling from level= or on= selection with a PeriodIndex is"
r" not currently supported, use \.set_index\(\.\.\.\) to"
" explicitly set index")
with pytest.raises(NotImplementedError, match=msg):
df.resample(freq, kind=kind, **kwargs)
@pytest.mark.parametrize('month', MONTHS)
@pytest.mark.parametrize('meth', ['ffill', 'bfill'])
@pytest.mark.parametrize('conv', ['start', 'end'])
@pytest.mark.parametrize('targ', ['D', 'B', 'M'])
def test_annual_upsample_cases(self, targ, conv, meth, month,
simple_period_range_series):
ts = simple_period_range_series(
'1/1/1990', '12/31/1991', freq='A-%s' % month)
result = getattr(ts.resample(targ, convention=conv), meth)()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, meth).to_period()
assert_series_equal(result, expected)
def test_basic_downsample(self, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
expected = ts.groupby(ts.index.year).mean()
expected.index = period_range('1/1/1990', '6/30/1995', freq='a-dec')
assert_series_equal(result, expected)
# this is ok
assert_series_equal(ts.resample('a-dec').mean(), result)
assert_series_equal(ts.resample('a').mean(), result)
@pytest.mark.parametrize('rule,expected_error_msg', [
('a-dec', '<YearEnd: month=12>'),
('q-mar', '<QuarterEnd: startingMonth=3>'),
('M', '<MonthEnd>'),
('w-thu', '<Week: weekday=3>')
])
def test_not_subperiod(
self, simple_period_range_series, rule, expected_error_msg):
# These are incompatible period rules for resampling
ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='w-wed')
msg = ("Frequency <Week: weekday=2> cannot be resampled to {}, as they"
" are not sub or super periods").format(expected_error_msg)
with pytest.raises(IncompatibleFrequency, match=msg):
ts.resample(rule).mean()
@pytest.mark.parametrize('freq', ['D', '2D'])
def test_basic_upsample(self, freq, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
resampled = result.resample(freq, convention='end').ffill()
expected = result.to_timestamp(freq, how='end')
expected = expected.asfreq(freq, 'ffill').to_period(freq)
assert_series_equal(resampled, expected)
def test_upsample_with_limit(self):
rng = period_range('1/1/2000', periods=5, freq='A')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('M', convention='end').ffill(limit=2)
expected = ts.asfreq('M').reindex(result.index, method='ffill',
limit=2)
assert_series_equal(result, expected)
def test_annual_upsample(self, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq='A-DEC')
df = | DataFrame({'a': ts}) | pandas.DataFrame |
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib import dates as mdates
from matplotlib.ticker import (AutoMinorLocator, MultipleLocator, MaxNLocator)
import matplotlib as mpl
import datetime
print("-----------")
print("Plotting Vaccination Data")
d_raw = pd.read_csv("data/csv/vax.csv")
d = d_raw.tail(720)
d = d.iloc[1:]
# fig.subplots_adjust(hspace=0.46, top=0.85, bottom=0)
fig, axs = plt.subplots(4, 1, sharex=True, figsize=(8,8))
x = d["Date"]
x = pd.to_datetime(d["Date"])
SpecPcr = d["jumlah_spesimen_pcr_tcm"]
SpecAntigen = d["jumlah_spesimen_antigen"]
SpecTotal = d["jumlah_spesimen_total"]
PplPcr = d["jumlah_orang_pcr_tcm"]
PplAntigen = d["jumlah_orang_antigen"]
PplTotal = d["jumlah_orang_total"]
SpecTotalCum = d["jumlah_spesimen_total_kum"]
PplTotalCum = d["jumlah_orang_total_kum"]
Vax1 = d["jumlah_vaksinasi_1"]
Vax2 = d["jumlah_vaksinasi_2"]
Vax1Cum = d["jumlah_jumlah_vaksinasi_1_kum"]
Vax2Cum = d["jumlah_jumlah_vaksinasi_2_kum"]
cc = ["#7eca9c","#d0af84","#b8b5ff","#8ac4d0","#4b778d","#206a5d","#91c788","#e2703a","#9e9d89","#f39189"]
df = pd.DataFrame({"Date": | pd.to_datetime(d["Date"]) | pandas.to_datetime |
""" Class for creating datasets for training a supervised translation classifier and for crosslingual information
retrieval.
"""
import pandas as pd
from tqdm import tqdm
from src.features.embedding_features import cosine_similarity_vector
from src.utils.timer import timer
class DataSet:
""" Class for creating datasets for training a supervised translation classifier and for crosslingual information
retrieval.
Attributes:
preprocessed_dataframe (dataframe): Preprocessed parallel translations dataframe.
model_subset (dataframe): Subset of preprocessed_dataframe for training a supervised translation classifier.
retrieval_subset (dataframe): Subset of preprocessed_datafram for testing crosslingual retrieval models.
model_dataset (dataframe):Generated dataset for training a supervised translation classifier.
retrieval_dataset (dataframe): Dataset for testing crosslingual retrieval models.
"""
@timer
def __init__(self, preprocessed_data):
""" Initialize class by importing preprocessed data.
Args:
preprocessed_data (dataframe): Preprocessed dataframe of parallel translations.
"""
self.preprocessed_dataframe = preprocessed_data
self.model_subset = pd.DataFrame()
self.retrieval_subset = pd.DataFrame()
self.model_dataset_index = pd.DataFrame()
self.model_dataset = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: UTF-8 -*-
# ********************************************************
# * Author : <NAME>
# * Email : <EMAIL>
# * Create time : 2021-07-26 17:03
# * Last modified : 2021-07-27 13:17
# * Filename : quant.py
# * Description :
# *********************************************************
import pandas as pd
import os
import json
from datetime import timedelta, datetime
from dplearn.tools import tick_start, tick_end
# =============================================================================
# ##### K Line Wrapping #####
# =============================================================================
def wrapKLine(data, open_c, close_c, high_c, low_c, vol_c, ts_c, ts_format, wrap):
"""
This is a function of wrapping K-Line dataframe into longer-duration one.
Input:
data: [pandas dataframe] K-Line dataframe
open_c: [string] Column name of open price
close_c: [string] Column name of close price
high_c: [string] Column name of highest price
low_c: [string] Column name of lowest price
vol_c: [string] Column name of lowest price
ts_c: [string] Column name of timestamp
ts_format: [string] Format of timestamp in input data (eg: "%Y-%m-%d %H:%M:%S")
wrap: [string] Time range that you want to wrap
Output:
Pandas dataframe
"""
tick_start("Wraping K Line data")
col_list = [open_c, close_c, high_c, low_c, vol_c, ts_c]
df = data[col_list]
df[ts_c] = pd.to_datetime(df[ts_c], format=ts_format)
# df["time_group"] = df[ts_c].dt.strftime("%Y-%m-%d@%H")
vol_new = df.groupby(pd.Grouper(key=ts_c, freq=wrap))[vol_c].agg('sum')
open_new = df.groupby(pd.Grouper(key=ts_c, freq=wrap))[open_c].first()
close_new = df.groupby( | pd.Grouper(key=ts_c, freq=wrap) | pandas.Grouper |
#!Python3
#Automated batch email program (Addresses.xlsx)
#by <NAME>
import os, glob, win32com.client, logging, smtplib
import pandas as pd
from email.utils import parseaddr
from email_validator import validate_email, EmailNotValidError
emailCount = 0
logging.basicConfig(filename = 'log.txt', level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s')
templateFolder = os.getcwd()+ '\\Email template\\' #set the run folder path
attachmentFolder = os.getcwd()+ '\\Attachments\\' #set attachment folder
# TODO get email template from folder
file = pd.ExcelFile('Email addresses.xlsx') #Establishes the excel file you wish to import into Pandas
logging.debug(file.sheet_names)
#--- Validate Email addresses excel file ---
print("Do you wish to:")
print("1.Validate spreadsheet 2.Test run/draft emails 3.Send emails")
testflag= input()
try:
testflag=int(testflag)
except:
print("Invalid input")
print("reading spreedsheet...")
for s in file.sheet_names:
df = file.parse(s) #Uploads Sheet1 from the Excel file into a dataframe
#--- Iterate through all sheets ---
if testflag >= 1:
for index, row in df.iterrows(): #Loops through each row in the dataframe
email = (row['Email Address']) #Sets dataframe variable, 'email' to cells in column 'Email Addresss'
subject = (row['Subject']) #Sets dataframe variable, 'subject' to cells in column 'Subject'
body = (row['Email HTML Body']) #Sets dataframe variable, 'body' to cells in column 'Email HTML Body'
#--- Print warnings ---
if | pd.isnull(email) | pandas.isnull |
import unittest
from setup.settings import *
from numpy.testing import *
from pandas.util.testing import *
import numpy as np
import dolphindb_numpy as dnp
import pandas as pd
import orca
class FunctionStdTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def test_function_math_std_scalar(self):
self.assertEqual(dnp.std(0.5), np.std(0.5))
self.assertEqual(dnp.std(1), np.std(1))
self.assertEqual(dnp.std(-1), np.std(-1))
self.assertEqual(dnp.std(0), np.std(0))
self.assertEqual(dnp.isnan(dnp.std(dnp.nan)), True)
self.assertEqual(np.isnan(np.std(np.nan)), True)
def test_function_math_std_list(self):
npa = np.std([1, 8, 27, -27, 0, 5, np.nan])
dnpa = dnp.std([1, 8, 27, -27, 0, 5, dnp.nan])
assert_array_equal(dnpa, npa)
def test_function_math_std_array(self):
npa = np.std(np.array([1, 8, 27, -27, 0, 5, np.nan]))
dnpa = dnp.std(dnp.array([1, 8, 27, -27, 0, 5, dnp.nan]))
assert_array_equal(dnpa, npa)
def test_function_math_std_series(self):
ps = | pd.Series([-1, 8, 27, -27, 0, 5, np.nan]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 1 10:10:01 2017
@author: belinkov
"""
import sys
assert sys.version_info[0] == 3, 'Must run with Python 3'
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
#plt.style.use('ggplot')
import pandas as pd
import seaborn as sns
sns.set_style('darkgrid')
# put all result files here
RESULTS_DIR = '../results'
langs = ['ar', 'es', 'fr', 'ru', 'zh', 'en']
pairs = [['en', 'ar'], ['en', 'es'], ['en', 'fr'], ['en', 'ru'], ['en', 'zh'],
['en', 'en'],
['ar', 'en'], ['es', 'en'], ['fr', 'en'], ['ru', 'en'], ['zh', 'en']]
pairs_en = [['en', 'ar'], ['en', 'es'], ['en', 'fr'], ['en', 'ru'], ['en', 'zh'],
['en', 'en']]
pretty_lang_names = {'en': 'English', 'ar':'Arabic', 'es':'Spanish', 'fr':'French', 'es':'Spanish', 'ru':'Russian', 'zh':'Chinese'}
pretty_dist_names = {'dist1': '1', 'dist2': '2', 'dist3': '3', 'dist4': '4', 'dist5': '5', 'dist6-7': '6-7', 'dist8-10': '8-10', 'dist10-': '>10' }
pretty_dist_names_list = ('1', '2', '3', '4', '5', '6-7', '8-10', '>10')
en_maj, en_mfl = 0.1332812025*100, 0.4047091533*100
ar_maj, ar_mfl = 0.2256017981*100, 0.5023926914*100
es_maj, es_mfl = 0.1499913599*100, 0.4269051322*100
fr_maj, fr_mfl = 0.1334860475*100, 0.4328404831*100
ru_maj, ru_mfl = 0.1876313145*100, 0.3099479309*100
zh_maj, zh_mfl = 0.1468902015*100, 0.3564107019*100
majs = [en_maj]*6 + [ar_maj, es_maj, fr_maj, ru_maj, zh_maj]
mfls = [en_mfl]*6 + [ar_mfl, es_mfl, fr_mfl, ru_mfl, zh_mfl]
def get_accs_from_df(df, col_pref='acc'):
accs = [df[col].values for col in df.columns if col.startswith(col_pref)]
return accs
def load_data(filename, sep='\t'):
df = pd.read_csv(filename, sep=sep)
df['mean'] = np.mean([df['acc1'], df['acc2'], df['acc3']], axis=0)
df['std'] = np.std([df['acc1'], df['acc2'], df['acc3']], axis=0)
return df
def load_data_by_distance(filename, sep='\t', scale=100):
df = pd.read_csv(filename, sep=sep)
dists = [col for col in df.columns if col.startswith('dist')]
for dist in dists:
df[dist] *= scale
# for source in df.source.unique():
# for target in df.target.unique():
# for layer in df.layer.unique():
# df_source_target_layer = df[(df['source'] == source) & (df['target'] == target) & (df['layer'] == layer)]
# df_mean = df_source_target_layer.mean()
# df_std = df_source_target_layer.std()
# mean_dists = [df_mean[d] for d in dists]
# std_dists = [df_std[d] for d in dists]
# series_mean = pd.Series([layer, source, target, 'mean'] + mean_dists, index=df.columns)
# series_std = pd.Series([layer, source, target, 'std'] + std_dists, index=df.columns)
# df.append(series_mean, ignore_index=True)
# df.append(series_std, ignore_index=True)
return df
def load_data_by_type(filename, sep='\t', scale=100):
df = pd.read_csv(filename, sep=sep)
types = df.columns[4:]
for t in types:
df[t] = df[t]
df[t] *= scale
return df
def load_data_by_type_all(filename, sep='\t', scale=100):
""" Load data for all language pairs by type
Convert types from column to values of a "relation" column
Empty cells are possible and they are non-existing types in the language, so they will not have corresponding rows
"""
layers, sources, targets, runs, relations, accs = [], [], [], [], [], []
header, types = None, None
with open(filename) as f:
for line in f:
splt = line.strip('\n').split('\t')
if header == None:
header = splt
types = splt[4:]
continue
layer, source, target, run = splt[:4]
for relation, acc in zip(types, splt[4:]):
if acc != '':
layers.append(layer)
sources.append(source)
targets.append(target)
runs.append(run)
relations.append(relation)
accs.append(float(acc)*scale)
df = | pd.DataFrame({'layer': layers, 'source': sources, 'target': targets, 'run': runs, 'relation': relations, 'accuracy': accs}) | pandas.DataFrame |
'''
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import time
import pandas as pd
from model.auxiliary import parse_scenario_name, percentile
from model.constants import MODEL_NAME
from pointers import METADATA_FILE_PATH, INTERMEDIATE_RESULT_FILE_PATH, FINAL_RESULT_FILE_PATH
def main():
# local variables
intermediate_result = INTERMEDIATE_RESULT_FILE_PATH
output_path = FINAL_RESULT_FILE_PATH
scenarios_array = | pd.read_excel(METADATA_FILE_PATH, sheet_name='SCENARIOS') | pandas.read_excel |
from contextlib import contextmanager
from unittest.mock import patch
from zipfile import ZipFile
from pandas import DataFrame, read_csv
from pandas.util.testing import assert_frame_equal
from pytest import raises, fixture, warns, mark
from IPython import get_ipython
from data_vault import Vault, parse_arguments, VaultMagics
from data_vault.frames import frame_manager
@contextmanager
def file_from_storage(archive_path, file_path, pwd: str = None, mode='r'):
if pwd:
pwd = pwd.encode()
with ZipFile(archive_path) as archive:
yield archive.open(
file_path,
mode=mode,
pwd=pwd
)
ipython = get_ipython()
EXAMPLE_DATA_FRAME = DataFrame([{'a': 1, 'b': 1}, {'a': 1, 'b': 2}])
def patch_ipython_globals(dummy_globals):
return patch.object(frame_manager, 'get_ipython_globals', return_value=dummy_globals)
@fixture
def mock_key(monkeypatch):
monkeypatch.setenv('KEY', 'a_strong_password')
def test_open_vault_message():
with raises(Exception, match='Please setup the storage with %open_vault first'):
ipython.magic('vault del x')
def test_vault_security_alert(tmpdir):
# should warn if not encryption key provided
with warns(UserWarning, match='Encryption variable not set - no encryption will be used..*'):
ipython.magic(f'open_vault --path {tmpdir}/archive.zip')
# should not warn if secure explicitly toggled off
with warns(None) as record:
ipython.magic(f'open_vault --path {tmpdir}/archive.zip --secure False')
assert not record.list
# should not warn if encryption key provided
with warns(None) as record:
ipython.magic(f'open_vault --path {tmpdir}/archive.zip -e SOME_KEY')
assert not record.list
def test_usage_help(tmpdir, mock_key):
ipython.magic(f'open_vault --path {tmpdir}/archive.zip -e KEY')
x = EXAMPLE_DATA_FRAME
with patch_ipython_globals(locals()):
with raises(ValueError, match='No command matched. Did you mean:\n\t - store .*?'):
ipython.magic('vault store x')
def test_variable_not_defined(tmpdir, mock_key):
ipython.magic(f'open_vault --path {tmpdir}/archive.zip -e KEY')
with patch_ipython_globals(locals()):
with raises(ValueError, match=".*variable 'x' is not defined in the global namespace.*"):
ipython.magic('vault store x')
def test_function_not_defined(tmpdir, mock_key):
ipython.magic(f'open_vault --path {tmpdir}/archive.zip -e KEY')
x = EXAMPLE_DATA_FRAME
with patch_ipython_globals(locals()):
with raises(NameError, match="function 'pipe_delimited' is not defined in the global namespace"):
ipython.magic('vault store x in my_frames with pipe_delimited')
def test_store(tmpdir):
ipython.magic(f'open_vault --path {tmpdir}/archive.zip --secure False')
x = EXAMPLE_DATA_FRAME
with patch_ipython_globals(locals()):
ipython.magic('vault store x in my_frames')
with file_from_storage(f'{tmpdir}/archive.zip', 'my_frames/x') as f:
data = read_csv(f, sep='\t', index_col=0)
assert x.equals(data)
def test_store_with_encryption(tmpdir, mock_key):
ipython.magic(f'open_vault --path {tmpdir}/archive.zip -e KEY')
x = EXAMPLE_DATA_FRAME
with patch_ipython_globals(locals()):
ipython.magic('vault store x in my_frames')
with raises(RuntimeError, match="File 'my_frames/x' is encrypted, password required for extraction"):
with file_from_storage(f'{tmpdir}/archive.zip', 'my_frames/x') as f:
data = | read_csv(f, sep='\t', index_col=0) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# General imports
import numpy as np
import pandas as pd
import os, sys, gc, time, warnings, pickle, psutil, random
from math import ceil
from sklearn.preprocessing import LabelEncoder
#warnings.filterwarnings('ignore')
import json
with open('SETTINGS.json', 'r') as myfile:
datafile=myfile.read()
SETTINGS = json.loads(datafile)
data_path = SETTINGS['RAW_DATA_DIR']
save_data_path = SETTINGS['PROCESSED_DATA_DIR']
def get_memory_usage():
return np.round(psutil.Process(os.getpid()).memory_info()[0]/2.**30, 2)
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
## Merging by concat to not lose dtypes
def merge_by_concat(df1, df2, merge_on):
merged_gf = df1[merge_on]
merged_gf = merged_gf.merge(df2, on=merge_on, how='left')
new_columns = [col for col in list(merged_gf) if col not in merge_on]
df1 = pd.concat([df1, merged_gf[new_columns]], axis=1)
return df1
################################### FE ###############################################################
########################### Vars
#################################################################################
TARGET = 'sales' # Our main target
END_TRAIN = 1941 #1913 + 28 # Last day in train set
MAIN_INDEX = ['id','d'] # We can identify item by these columns
########################### Load Data
#################################################################################
print('Load Main Data')
# Here are reafing all our data
# without any limitations and dtype modification
train_df = pd.read_csv(data_path+'sales_train_evaluation.csv')
# train_df = pd.read_csv(data_path+'sales_train_validation.csv')
prices_df = pd.read_csv(data_path+'sell_prices.csv')
calendar_df = pd.read_csv(data_path+'calendar.csv')
########################### Make Grid
#################################################################################
print('Create Grid')
# We can tranform horizontal representation
# to vertical "view"
# Our "index" will be 'id','item_id','dept_id','cat_id','store_id','state_id'
# and labels are 'd_' coulmns
index_columns = ['id','item_id','dept_id','cat_id','store_id','state_id']
grid_df = pd.melt(train_df,
id_vars = index_columns,
var_name = 'd',
value_name = TARGET)
# If we look on train_df we se that
# we don't have a lot of traning rows
# but each day can provide more train data
print('Train rows:', len(train_df), len(grid_df))
# To be able to make predictions
# we need to add "test set" to our grid
add_grid = pd.DataFrame()
for i in range(1,29):
temp_df = train_df[index_columns]
temp_df = temp_df.drop_duplicates()
temp_df['d'] = 'd_'+ str(END_TRAIN+i)
temp_df[TARGET] = np.nan
add_grid = pd.concat([add_grid,temp_df])
grid_df = pd.concat([grid_df,add_grid])
grid_df = grid_df.reset_index(drop=True)
# Remove some temoprary DFs
del temp_df, add_grid
# We will not need original train_df
# anymore and can remove it
del train_df
# You don't have to use df = df construction
# you can use inplace=True instead.
# like this
# grid_df.reset_index(drop=True, inplace=True)
# Let's check our memory usage
print("{:>20}: {:>8}".format('Original grid_df',sizeof_fmt(grid_df.memory_usage(index=True).sum())))
# We can free some memory
# by converting "strings" to categorical
# it will not affect merging and
# we will not lose any valuable data
for col in index_columns:
grid_df[col] = grid_df[col].astype('category')
# Let's check again memory usage
print("{:>20}: {:>8}".format('Reduced grid_df',sizeof_fmt(grid_df.memory_usage(index=True).sum())))
########################### Product Release date
#################################################################################
print('Release week')
# It seems that leadings zero values
# in each train_df item row
# are not real 0 sales but mean
# absence for the item in the store
# we can safe some memory by removing
# such zeros
# Prices are set by week
# so it we will have not very accurate release week
release_df = prices_df.groupby(['store_id','item_id'])['wm_yr_wk'].agg(['min']).reset_index()
release_df.columns = ['store_id','item_id','release']
# Now we can merge release_df
grid_df = merge_by_concat(grid_df, release_df, ['store_id','item_id'])
del release_df
# We want to remove some "zeros" rows
# from grid_df
# to do it we need wm_yr_wk column
# let's merge partly calendar_df to have it
grid_df = merge_by_concat(grid_df, calendar_df[['wm_yr_wk','d']], ['d'])
# Now we can cutoff some rows
# and safe memory
grid_df = grid_df[grid_df['wm_yr_wk']>=grid_df['release']]
grid_df = grid_df.reset_index(drop=True)
# Let's check our memory usage
print("{:>20}: {:>8}".format('Original grid_df',sizeof_fmt(grid_df.memory_usage(index=True).sum())))
# Should we keep release week
# as one of the features?
# Only good CV can give the answer.
# Let's minify the release values.
# Min transformation will not help here
# as int16 -> Integer (-32768 to 32767)
# and our grid_df['release'].max() serves for int16
# but we have have an idea how to transform
# other columns in case we will need it
grid_df['release'] = grid_df['release'] - grid_df['release'].min()
grid_df['release'] = grid_df['release'].astype(np.int16)
# Let's check again memory usage
print("{:>20}: {:>8}".format('Reduced grid_df',sizeof_fmt(grid_df.memory_usage(index=True).sum())))
########################### Save part 1
#################################################################################
print('Save Part 1')
# We have our BASE grid ready
# and can save it as pickle file
# for future use (model training)
grid_df.to_pickle(save_data_path+'grid_part_1_eval.pkl')
print('Size:', grid_df.shape)
########################### Prices
#################################################################################
print('Prices')
# We can do some basic aggregations
prices_df['price_max'] = prices_df.groupby(['store_id','item_id'])['sell_price'].transform('max')
prices_df['price_min'] = prices_df.groupby(['store_id','item_id'])['sell_price'].transform('min')
prices_df['price_std'] = prices_df.groupby(['store_id','item_id'])['sell_price'].transform('std')
prices_df['price_mean'] = prices_df.groupby(['store_id','item_id'])['sell_price'].transform('mean')
# and do price normalization (min/max scaling)
prices_df['price_norm'] = prices_df['sell_price']/prices_df['price_max']
# Some items are can be inflation dependent
# and some items are very "stable"
prices_df['price_nunique'] = prices_df.groupby(['store_id','item_id'])['sell_price'].transform('nunique')
prices_df['item_nunique'] = prices_df.groupby(['store_id','sell_price'])['item_id'].transform('nunique')
# I would like some "rolling" aggregations
# but would like months and years as "window"
calendar_prices = calendar_df[['wm_yr_wk','month','year']]
calendar_prices = calendar_prices.drop_duplicates(subset=['wm_yr_wk'])
prices_df = prices_df.merge(calendar_prices[['wm_yr_wk','month','year']], on=['wm_yr_wk'], how='left')
del calendar_prices
# Now we can add price "momentum" (some sort of)
# Shifted by week
# by month mean
# by year mean
prices_df['price_momentum'] = prices_df['sell_price']/prices_df.groupby(['store_id','item_id'])['sell_price'].transform(lambda x: x.shift(1))
prices_df['price_momentum_m'] = prices_df['sell_price']/prices_df.groupby(['store_id','item_id','month'])['sell_price'].transform('mean')
prices_df['price_momentum_y'] = prices_df['sell_price']/prices_df.groupby(['store_id','item_id','year'])['sell_price'].transform('mean')
del prices_df['month'], prices_df['year']
########################### Merge prices and save part 2
#################################################################################
print('Merge prices and save part 2')
# Merge Prices
original_columns = list(grid_df)
grid_df = grid_df.merge(prices_df, on=['store_id','item_id','wm_yr_wk'], how='left')
keep_columns = [col for col in list(grid_df) if col not in original_columns]
grid_df = grid_df[MAIN_INDEX+keep_columns]
grid_df = reduce_mem_usage(grid_df)
# Safe part 2
grid_df.to_pickle(save_data_path+'grid_part_2_eval.pkl')
print('Size:', grid_df.shape)
# We don't need prices_df anymore
del prices_df
# We can remove new columns
# or just load part_1
grid_df = pd.read_pickle(save_data_path+'grid_part_1_eval.pkl')
########################### Merge calendar
#################################################################################
grid_df = grid_df[MAIN_INDEX]
# Merge calendar partly
icols = ['date',
'd',
'event_name_1',
'event_type_1',
'event_name_2',
'event_type_2',
'snap_CA',
'snap_TX',
'snap_WI']
grid_df = grid_df.merge(calendar_df[icols], on=['d'], how='left')
# Minify data
# 'snap_' columns we can convert to bool or int8
icols = ['event_name_1',
'event_type_1',
'event_name_2',
'event_type_2',
'snap_CA',
'snap_TX',
'snap_WI']
for col in icols:
grid_df[col] = grid_df[col].astype('category')
# Convert to DateTime
grid_df['date'] = pd.to_datetime(grid_df['date'])
# Make some features from date
grid_df['tm_d'] = grid_df['date'].dt.day.astype(np.int8)
grid_df['tm_w'] = grid_df['date'].dt.week.astype(np.int8)
grid_df['tm_m'] = grid_df['date'].dt.month.astype(np.int8)
grid_df['tm_y'] = grid_df['date'].dt.year
grid_df['tm_y'] = (grid_df['tm_y'] - grid_df['tm_y'].min()).astype(np.int8)
grid_df['tm_wm'] = grid_df['tm_d'].apply(lambda x: ceil(x/7)).astype(np.int8)
grid_df['tm_dw'] = grid_df['date'].dt.dayofweek.astype(np.int8)
grid_df['tm_w_end'] = (grid_df['tm_dw']>=5).astype(np.int8)
# Remove date
del grid_df['date']
########################### Save part 3 (Dates)
#################################################################################
print('Save part 3')
# Safe part 3
grid_df.to_pickle(save_data_path+'grid_part_3_eval.pkl')
print('Size:', grid_df.shape)
# We don't need calendar_df anymore
del calendar_df
del grid_df
########################### Some additional cleaning
#################################################################################
## Part 1
# Convert 'd' to int
grid_df = pd.read_pickle(save_data_path+'grid_part_1_eval.pkl')
grid_df['d'] = grid_df['d'].apply(lambda x: x[2:]).astype(np.int16)
# Remove 'wm_yr_wk'
# as test values are not in train set
del grid_df['wm_yr_wk']
grid_df.to_pickle(save_data_path+'grid_part_1_eval.pkl')
del grid_df
########################### Summary
#################################################################################
# Now we have 3 sets of features
grid_df = pd.concat([pd.read_pickle(save_data_path+'grid_part_1_eval.pkl'),
pd.read_pickle(save_data_path+'grid_part_2_eval.pkl').iloc[:,2:],
pd.read_pickle(save_data_path+'grid_part_3_eval.pkl').iloc[:,2:]],
axis=1)
# Let's check again memory usage
print("{:>20}: {:>8}".format('Full Grid',sizeof_fmt(grid_df.memory_usage(index=True).sum())))
print('Size:', grid_df.shape)
# 2.5GiB + is is still too big to train our model
# (on kaggle with its memory limits)
# and we don't have lag features yet
# But what if we can train by state_id or shop_id?
state_id = 'CA'
grid_df = grid_df[grid_df['state_id']==state_id]
print("{:>20}: {:>8}".format('Full Grid',sizeof_fmt(grid_df.memory_usage(index=True).sum())))
# Full Grid: 1.2GiB
store_id = 'CA_1'
grid_df = grid_df[grid_df['store_id']==store_id]
print("{:>20}: {:>8}".format('Full Grid',sizeof_fmt(grid_df.memory_usage(index=True).sum())))
# Full Grid: 321.2MiB
# Seems its good enough now
# In other kernel we will talk about LAGS features
# Thank you.
########################### Final list of features
#################################################################################
#grid_df.info()
######################################################################################################
################################### LAG FEATRES ######################################################
# We will need only train dataset
# to show lags concept
train_df = | pd.read_csv(data_path+'sales_train_evaluation.csv') | pandas.read_csv |
import os
from os import listdir
from os.path import isfile, join
import re
from path import Path
import numpy as np
import pandas as pd
from poor_trader import utils
from poor_trader.utils import quotes_range
from poor_trader.config import INDICATORS_OUTPUT_PATH
def _true_range(df_quotes, indices):
cur = df_quotes.iloc[indices[1]]
prev = df_quotes.iloc[indices[0]]
high, low, prev_close = cur.High, cur.Low, prev.Close
a = utils.roundn(high - low, 4)
b = utils.roundn(abs(high - prev_close), 4)
c = utils.roundn(abs(low - prev_close), 4)
return max(a, b, c)
def true_range(df_quotes):
df = pd.DataFrame(index=df_quotes.index)
df['n_index'] = range(len(df_quotes))
_trf = lambda x: _true_range(df_quotes, [int(i) for i in x])
df['true_range'] = df.n_index.rolling(2).apply(_trf)
return df.filter(like='true_range')
def SMA(df_quotes, period, field='Close', symbol=None):
if symbol:
outpath = INDICATORS_OUTPUT_PATH / '{}/{}_{}_SMA_{}.pkl'.format(symbol, quotes_range(df_quotes), field, period)
if os.path.exists(outpath):
return pd.read_pickle(outpath)
df = pd.DataFrame(index=df_quotes.index)
df['SMA'] = df_quotes[field].rolling(period).mean()
df = utils.round_df(df)
if symbol:
if not os.path.exists(outpath.parent):
os.makedirs(outpath.parent)
df.to_pickle(outpath)
return df
def STDEV(df_quotes, period, field='Close', symbol=None):
if symbol:
outpath = INDICATORS_OUTPUT_PATH / '{}/{}_{}_STDEV_{}.pkl'.format(symbol, quotes_range(df_quotes), field, period)
if os.path.exists(outpath):
return pd.read_pickle(outpath)
df = | pd.DataFrame(index=df_quotes.index) | pandas.DataFrame |
from functools import partial
import json
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from solarforecastarbiter.io import utils
# data for test Dataframe
TEST_DICT = {'value': [2.0, 43.9, 338.0, -199.7, 0.32],
'quality_flag': [1, 1, 9, 5, 2]}
DF_INDEX = pd.date_range(start=pd.Timestamp('2019-01-24T00:00'),
freq='1min',
periods=5,
tz='UTC', name='timestamp')
DF_INDEX.freq = None
TEST_DATA = pd.DataFrame(TEST_DICT, index=DF_INDEX)
EMPTY_SERIES = pd.Series(dtype=float)
EMPTY_TIMESERIES = pd.Series([], name='value', index=pd.DatetimeIndex(
[], name='timestamp', tz='UTC'), dtype=float)
EMPTY_DATAFRAME = pd.DataFrame(dtype=float)
EMPTY_TIME_DATAFRAME = pd.DataFrame([], index=pd.DatetimeIndex(
[], name='timestamp', tz='UTC'), dtype=float)
TEST_DATAFRAME = pd.DataFrame({
'25.0': [0.0, 1, 2, 3, 4, 5],
'50.0': [1.0, 2, 3, 4, 5, 6],
'75.0': [2.0, 3, 4, 5, 6, 7]},
index=pd.date_range(start='20190101T0600',
end='20190101T1100',
freq='1h',
tz='America/Denver',
name='timestamp')).tz_convert('UTC')
@pytest.mark.parametrize('dump_quality,default_flag,flag_value', [
(False, None, 1),
(True, 2, 2)
])
def test_obs_df_to_json(dump_quality, default_flag, flag_value):
td = TEST_DATA.copy()
if dump_quality:
del td['quality_flag']
converted = utils.observation_df_to_json_payload(td, default_flag)
converted_dict = json.loads(converted)
assert 'values' in converted_dict
values = converted_dict['values']
assert len(values) == 5
assert values[0]['timestamp'] == '2019-01-24T00:00:00Z'
assert values[0]['quality_flag'] == flag_value
assert isinstance(values[0]['value'], float)
def test_obs_df_to_json_no_quality():
td = TEST_DATA.copy()
del td['quality_flag']
with pytest.raises(KeyError):
utils.observation_df_to_json_payload(td)
def test_obs_df_to_json_no_values():
td = TEST_DATA.copy().rename(columns={'value': 'val1'})
with pytest.raises(KeyError):
utils.observation_df_to_json_payload(td)
def test_forecast_series_to_json():
series = pd.Series([0, 1, 2, 3, 4], index=pd.date_range(
start='2019-01-01T12:00Z', freq='5min', periods=5))
expected = [{'value': 0.0, 'timestamp': '2019-01-01T12:00:00Z'},
{'value': 1.0, 'timestamp': '2019-01-01T12:05:00Z'},
{'value': 2.0, 'timestamp': '2019-01-01T12:10:00Z'},
{'value': 3.0, 'timestamp': '2019-01-01T12:15:00Z'},
{'value': 4.0, 'timestamp': '2019-01-01T12:20:00Z'}]
json_out = utils.forecast_object_to_json(series)
assert json.loads(json_out)['values'] == expected
def test_json_payload_to_observation_df(observation_values,
observation_values_text):
out = utils.json_payload_to_observation_df(
json.loads(observation_values_text))
pdt.assert_frame_equal(out, observation_values)
def test_json_payload_to_forecast_series(forecast_values,
forecast_values_text):
out = utils.json_payload_to_forecast_series(
json.loads(forecast_values_text))
| pdt.assert_series_equal(out, forecast_values) | pandas.testing.assert_series_equal |
"""SDV Sampler."""
import itertools
import exrex
import numpy as np
import pandas as pd
class Sampler:
"""Sampler class.
Args:
metadata (Metadata):
Dataset Metadata.
models (dict):
Fitted table models.
model (SDVModel):
Model class to use to sample data.
model_kwargs (dict):
Additional arguments to create the ``SDVModel``.
table_sizes (dict):
Dict indicating the sizes of the tables in the orignal dataset.
"""
metadata = None
models = None
primary_key = None
remaining_primary_key = None
def __init__(self, metadata, models, model, model_kwargs, table_sizes):
self.metadata = metadata
self.models = models
self.primary_key = dict()
self.remaining_primary_key = dict()
self.model = model
self.model_kwargs = model_kwargs
self.table_sizes = table_sizes
def _reset_primary_keys_generators(self):
"""Reset the primary key generators."""
self.primary_key = dict()
self.remaining_primary_key = dict()
def _finalize(self, sampled_data):
"""Do the final touches to the generated data.
This method reverts the previous transformations to go back
to values in the original space and also adds the parent
keys in case foreign key relationships exist between the tables.
Args:
sampled_data (dict):
Generated data
Return:
pandas.DataFrame:
Formatted synthesized data.
"""
final_data = dict()
for table_name, table_rows in sampled_data.items():
parents = self.metadata.get_parents(table_name)
if parents:
for parent_name in parents:
foreign_key = self.metadata.get_foreign_key(parent_name, table_name)
if foreign_key not in table_rows:
parent_ids = self._find_parent_ids(table_name, parent_name, sampled_data)
table_rows[foreign_key] = parent_ids
reversed_data = self.metadata.reverse_transform(table_name, table_rows)
fields = self.metadata.get_fields(table_name)
final_data[table_name] = reversed_data[list(fields.keys())]
return final_data
def _get_primary_keys(self, table_name, num_rows):
"""Return the primary key and amount of values for the requested table.
Args:
table_name (str):
Name of the table to get the primary keys from.
num_rows (str):
Number of ``primary_keys`` to generate.
Returns:
tuple (str, pandas.Series):
primary key name and primary key values. If the table has no primary
key, ``(None, None)`` is returned.
Raises:
ValueError:
If the ``metadata`` contains invalid types or subtypes, or if
there are not enough primary keys left on any of the generators.
NotImplementedError:
If the primary key subtype is a ``datetime``.
"""
primary_key = self.metadata.get_primary_key(table_name)
primary_key_values = None
if primary_key:
field = self.metadata.get_fields(table_name)[primary_key]
generator = self.primary_key.get(table_name)
if generator is None:
if field['type'] != 'id':
raise ValueError('Only columns with type `id` can be primary keys')
subtype = field.get('subtype', 'integer')
if subtype == 'integer':
generator = itertools.count()
remaining = np.inf
elif subtype == 'string':
regex = field.get('regex', r'^[a-zA-Z]+$')
generator = exrex.generate(regex)
remaining = exrex.count(regex)
elif subtype == 'datetime':
raise NotImplementedError('Datetime ids are not yet supported')
else:
raise ValueError('Only `integer` or `string` id columns are supported.')
self.primary_key[table_name] = generator
self.remaining_primary_key[table_name] = remaining
else:
remaining = self.remaining_primary_key[table_name]
if remaining < num_rows:
raise ValueError(
'Not enough unique values for primary key of table {}'
' to generate {} samples.'.format(table_name, num_rows)
)
self.remaining_primary_key[table_name] -= num_rows
primary_key_values = pd.Series([x for i, x in zip(range(num_rows), generator)])
return primary_key, primary_key_values
def _extract_parameters(self, parent_row, table_name):
"""Get the params from a generated parent row.
Args:
parent_row (pandas.Series):
A generated parent row.
table_name (str):
Name of the table to make the model for.
"""
prefix = '__{}__'.format(table_name)
keys = [key for key in parent_row.keys() if key.startswith(prefix)]
new_keys = {key: key[len(prefix):] for key in keys}
flat_parameters = parent_row[keys]
return flat_parameters.rename(new_keys).to_dict()
def _sample_rows(self, model, num_rows, table_name):
"""Sample ``num_rows`` from ``model``.
Args:
model (copula.multivariate.base):
Fitted model.
num_rows (int):
Number of rows to sample.
table_name (str):
Name of the table to sample from.
Returns:
pandas.DataFrame:
Sampled rows, shape (, num_rows)
"""
primary_key_name, primary_key_values = self._get_primary_keys(table_name, num_rows)
sampled = model.sample(num_rows)
if primary_key_name:
sampled[primary_key_name] = primary_key_values
return sampled
def _sample_children(self, table_name, sampled_data, table_rows=None):
if table_rows is None:
table_rows = sampled_data[table_name]
for child_name in self.metadata.get_children(table_name):
for _, row in table_rows.iterrows():
self._sample_child_rows(child_name, table_name, row, sampled_data)
def _sample_child_rows(self, table_name, parent_name, parent_row, sampled_data):
parameters = self._extract_parameters(parent_row, table_name)
model = self.model(**self.model_kwargs)
model.set_parameters(parameters)
num_rows = max(round(parameters['child_rows']), 0)
table_rows = self._sample_rows(model, num_rows, table_name)
parent_key = self.metadata.get_primary_key(parent_name)
foreign_key = self.metadata.get_foreign_key(parent_name, table_name)
table_rows[foreign_key] = parent_row[parent_key]
previous = sampled_data.get(table_name)
if previous is None:
sampled_data[table_name] = table_rows
else:
sampled_data[table_name] = pd.concat([previous, table_rows]).reset_index(drop=True)
self._sample_children(table_name, sampled_data, table_rows)
@staticmethod
def _find_parent_id(likelihoods, num_rows):
mean = likelihoods.mean()
if (likelihoods == 0).all():
# All rows got 0 likelihood, fallback to num_rows
likelihoods = num_rows
elif pd.isnull(mean) or mean == 0:
# Some rows got singlar matrix error and the rest were 0
# Fallback to num_rows on the singular matrix rows and
# keep 0s on the rest.
likelihoods = likelihoods.fillna(num_rows)
else:
# at least one row got a valid likelihood, so fill the
# rows that got a singular matrix error with the mean
likelihoods = likelihoods.fillna(mean)
weights = likelihoods.values / likelihoods.sum()
return np.random.choice(likelihoods.index, p=weights)
def _get_likelihoods(self, table_rows, parent_rows, table_name):
likelihoods = dict()
for parent_id, row in parent_rows.iterrows():
parameters = self._extract_parameters(row, table_name)
model = self.model(**self.model_kwargs)
model.set_parameters(parameters)
try:
likelihoods[parent_id] = model.model.probability_density(table_rows)
except np.linalg.LinAlgError:
likelihoods[parent_id] = None
return | pd.DataFrame(likelihoods, index=table_rows.index) | pandas.DataFrame |
# coding=utf-8
"""
Graph - the name of the current project.
instrument.py - the name of the new file which you specify in the New File
dialog box during the file creation.
Hossein - the login name of the current user.
6 / 15 / 18 - the current system date.
8: 03 AM - the current system time.
PyCharm - the name of the IDE in which the file will be created.
"""
import tensorflow as tf
import pandas as pd
import datetime
from price_fetcher.bigquery import GoogleQuery
chunck_size = 72
n_chuncks = 10
sess = tf.Session()
# First let's load meta graph and restore weights
saver = tf.train.import_meta_graph('../ttp_model/my_model.meta')
saver.restore(sess, tf.train.latest_checkpoint('../ttp_model'))
# Now, let's access and create placeholders variables and
# create feed-dict to feed new data
def get_raw_data(ticker):
"""
gets the data for the list of tickers
:return: dataframe
"""
google = GoogleQuery(ticker, dataset_id='my_dataset')
query = google.query(last=3600)
query = query.sort_index()
if query.empty:
return query
dates = list(set(query.index.date))
print(ticker)
result = | pd.DataFrame(columns=['Price', 'Volume']) | pandas.DataFrame |
"""
@brief test log(time=4s)
"""
import unittest
from logging import getLogger
import warnings
import numpy
from pandas import DataFrame
from pyquickhelper.pycode import ExtTestCase
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.svm import SVR, SVC
from mlprodict.onnx_conv import register_converters, to_onnx
from mlprodict.onnxrt import OnnxInference
from mlprodict.tools.asv_options_helper import get_ir_version_from_onnx
class TestOnnxConvSVM(ExtTestCase):
def setUp(self):
logger = getLogger('skl2onnx')
logger.disabled = True
def test_register_converters(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", ResourceWarning)
res = register_converters(True)
self.assertGreater(len(res), 2)
def onnx_test_svm_single_classreg(self, dtype, n_targets=1, debug=False,
add_noise=False, runtime='python',
target_opset=None,
kind='reg', level=1, **kwargs):
iris = load_iris()
X, y = iris.data, iris.target
if add_noise:
X += numpy.random.randn(X.shape[0], X.shape[1]) * 10
if kind == 'reg':
y = y.astype(dtype)
elif kind == 'bin':
y = (y % 2).astype(numpy.int64)
elif kind == 'mcl':
y = y.astype(numpy.int64)
else:
raise AssertionError("unknown '{}'".format(kind))
if n_targets != 1:
yn = numpy.empty((y.shape[0], n_targets), dtype=dtype)
for i in range(n_targets):
yn[:, i] = y + i
y = yn
X_train, X_test, y_train, _ = train_test_split(X, y, random_state=11)
X_test = X_test.astype(dtype)
if kind in ('bin', 'mcl'):
clr = SVC(**kwargs)
elif kind == 'reg':
clr = SVR(**kwargs)
clr.fit(X_train, y_train)
model_def = to_onnx(clr, X_train.astype(dtype),
rewrite_ops=True,
target_opset=target_opset)
if 'onnxruntime' in runtime:
model_def.ir_version = get_ir_version_from_onnx()
try:
oinf = OnnxInference(model_def, runtime=runtime)
except RuntimeError as e:
if debug:
raise RuntimeError(
"Unable to create a model\n{}".format(model_def)) from e
raise e
if debug:
y = oinf.run({'X': X_test}, verbose=level, fLOG=print)
else:
y = oinf.run({'X': X_test})
lexp = clr.predict(X_test)
if kind == 'reg':
self.assertEqual(list(sorted(y)), ['variable'])
if dtype == numpy.float32:
self.assertEqualArray(
lexp.ravel(), y['variable'].ravel(), decimal=5)
else:
self.assertEqualArray(lexp, y['variable'], decimal=5)
else:
self.assertEqual(list(sorted(y)),
['output_label', 'output_probability'])
self.assertEqualArray(lexp, y['output_label'])
lprob = clr.predict_proba(X_test)
self.assertEqualArray(
lprob, | DataFrame(y['output_probability']) | pandas.DataFrame |
from pandas.compat import range, lrange
import numpy as np
import pandas as pd
import pandas.core.common as com
from pandas.core.frame import DataFrame
import pandas.core.nanops as nanops
def pivot_annual(series, freq=None):
"""
Group a series by years, taking leap years into account.
The output has as many rows as distinct years in the original series,
and as many columns as the length of a leap year in the units corresponding
to the original frequency (366 for daily frequency, 366*24 for hourly...).
The fist column of the output corresponds to Jan. 1st, 00:00:00,
while the last column corresponds to Dec, 31st, 23:59:59.
Entries corresponding to Feb. 29th are masked for non-leap years.
For example, if the initial series has a daily frequency, the 59th column
of the output always corresponds to Feb. 28th, the 61st column to Mar. 1st,
and the 60th column is masked for non-leap years.
With a hourly initial frequency, the (59*24)th column of the output always
correspond to Feb. 28th 23:00, the (61*24)th column to Mar. 1st, 00:00, and
the 24 columns between (59*24) and (61*24) are masked.
If the original frequency is less than daily, the output is equivalent to
``series.convert('A', func=None)``.
Parameters
----------
series : TimeSeries
freq : string or None, default None
Returns
-------
annual : DataFrame
"""
index = series.index
year = index.year
years = nanops.unique1d(year)
if freq is not None:
freq = freq.upper()
else:
freq = series.index.freq
if freq == 'D':
width = 366
offset = index.dayofyear - 1
# adjust for leap year
offset[(~isleapyear(year)) & (offset >= 59)] += 1
columns = lrange(1, 367)
# todo: strings like 1/1, 1/25, etc.?
elif freq in ('M', 'BM'):
width = 12
offset = index.month - 1
columns = lrange(1, 13)
elif freq == 'H':
width = 8784
grouped = series.groupby(series.index.year)
defaulted = grouped.apply(lambda x: x.reset_index(drop=True))
defaulted.index = defaulted.index.droplevel(0)
offset = np.asarray(defaulted.index)
offset[~isleapyear(year) & (offset >= 1416)] += 24
columns = lrange(1, 8785)
else:
raise NotImplementedError(freq)
flat_index = (year - years.min()) * width + offset
flat_index = | com._ensure_platform_int(flat_index) | pandas.core.common._ensure_platform_int |
"""
After extracting skills by clustering skill sentences, in this script names and
examples are given to each skill.
The skills_data outputed is a dictionary with the following fields for each skill number:
'Skills name' : The closest ngram to the centroid of all the
sentence embeddings which were clustered to create the skill using cosine similarity,
or the shortest skill cluster description.
'Examples': The original sentences which are closest to the centroid of the skill cluster.
'Texts': All the cleaned sentences that went into creating the skill cluster.
Usage:
python -i skills_taxonomy_v2/pipeline/skills_extraction/skills_naming.py --config_path 'skills_taxonomy_v2/config/skills_extraction/2021.11.05.yaml'
"""
from argparse import ArgumentParser
import logging
import yaml
import pandas as pd
from tqdm import tqdm
import boto3
from skills_taxonomy_v2.getters.s3_data import load_s3_data, save_to_s3, get_s3_data_paths
from skills_taxonomy_v2 import BUCKET_NAME
from skills_taxonomy_v2.pipeline.skills_extraction.skills_naming_utils import (
get_skill_info,
)
from skills_taxonomy_v2.pipeline.skills_extraction.extract_skills_utils import (
get_output_config_stamped,
)
logger = logging.getLogger(__name__)
def load_process_sentence_data(s3, reduced_embeddings_paths):
sentences_data = pd.DataFrame()
for reduced_embeddings_path in tqdm(reduced_embeddings_paths):
sentences_data_i = load_s3_data(
s3, BUCKET_NAME,
reduced_embeddings_path
)
sentences_data = pd.concat([sentences_data, pd.DataFrame(sentences_data_i)])
sentences_data.reset_index(drop=True, inplace=True)
logger.info(f"{len(sentences_data)} sentences loaded")
return sentences_data
def parse_arguments(parser):
parser.add_argument(
"--config_path",
help="Path to config file",
default="skills_taxonomy_v2/config/skills_extraction/2021.11.05.yaml",
)
return parser.parse_args()
if __name__ == "__main__":
parser = ArgumentParser()
args = parse_arguments(parser)
with open(args.config_path, "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
FLOW_ID = "name_skills"
flow_config = config["flows"][FLOW_ID]
params = flow_config["params"]
s3 = boto3.resource("s3")
# Load data
skill_sentences = load_s3_data(s3, BUCKET_NAME, params["skill_sentences_path"])
reduced_embeddings_paths = get_s3_data_paths(
s3,
BUCKET_NAME,
params["skills_embeds_path"],
file_types=["*sentences_data_*.json"]
)
skills_embeds_df = load_process_sentence_data(s3, reduced_embeddings_paths)
sent_cluster_embeds = load_s3_data(
s3, BUCKET_NAME, params["mean_skills_embeds_path"]
)
skills = load_s3_data(s3, BUCKET_NAME, params["skills_path"])
# wrangle data in the format needed
skill_sentences_df = pd.DataFrame(skill_sentences)[
["job id", "sentence id", "Cluster number predicted"]
]
merged_sents_embeds = pd.merge(
skills_embeds_df, skill_sentences_df, on=["job id", "sentence id"], how='left'
)
merged_sents_embeds = merged_sents_embeds[
merged_sents_embeds["Cluster number predicted"] != -2
]
skills_df = | pd.DataFrame(skills) | pandas.DataFrame |
import csv
import shutil
import io
import math
import os
import tempfile
import time
from builtins import zip
import pandas as pd
import pytest
from unittest.mock import call, MagicMock
from tests.unit.test_utils.unit_utils import StringIOContextManager
from synapseclient import client, Entity, Synapse
from synapseclient.core.exceptions import SynapseError, SynapseTimeoutError
from synapseclient.entity import split_entity_namespaces
import synapseclient.table
from synapseclient.table import Column, Schema, CsvFileTable, TableQueryResult, cast_values, \
as_table_columns, Table, build_table, RowSet, SelectColumn, EntityViewSchema, RowSetTable, Row, PartialRow, \
PartialRowset, SchemaBase, _get_view_type_mask_for_deprecated_type, EntityViewType, _get_view_type_mask, \
MAX_NUM_TABLE_COLUMNS, SubmissionViewSchema, escape_column_name, join_column_names
from synapseclient.core.utils import from_unix_epoch_time
from unittest.mock import patch
from collections import OrderedDict
def test_cast_values():
selectColumns = [{'id': '353',
'name': 'name',
'columnType': 'STRING'},
{'id': '354',
'name': 'foo',
'columnType': 'STRING'},
{'id': '355',
'name': 'x',
'columnType': 'DOUBLE'},
{'id': '356',
'name': 'n',
'columnType': 'INTEGER'},
{'id': '357',
'name': 'bonk',
'columnType': 'BOOLEAN'},
{'id': '358',
'name': 'boom',
'columnType': 'LINK'}]
row = ('Finklestein', 'bat', '3.14159', '65535', 'true', 'https://www.synapse.org/')
assert (
cast_values(row, selectColumns) ==
['Finklestein', 'bat', 3.14159, 65535, True, 'https://www.synapse.org/']
)
# group by
selectColumns = [{'name': 'bonk',
'columnType': 'BOOLEAN'},
{'name': 'COUNT(name)',
'columnType': 'INTEGER'},
{'name': 'AVG(x)',
'columnType': 'DOUBLE'},
{'name': 'SUM(n)',
'columnType': 'INTEGER'}]
row = ('true', '211', '1.61803398875', '1421365')
assert cast_values(row, selectColumns) == [True, 211, 1.61803398875, 1421365]
def test_cast_values__unknown_column_type():
selectColumns = [{'id': '353',
'name': 'name',
'columnType': 'INTEGER'},
{'id': '354',
'name': 'foo',
'columnType': 'DEFINTELY_NOT_A_EXISTING_TYPE'},
]
row = ('123', 'othervalue')
assert (
cast_values(row, selectColumns) ==
[123, 'othervalue']
)
def test_cast_values__list_type():
selectColumns = [{'id': '354',
'name': 'foo',
'columnType': 'STRING_LIST'},
{'id': '356',
'name': 'n',
'columnType': 'INTEGER_LIST'},
{'id': '357',
'name': 'bonk',
'columnType': 'BOOLEAN_LIST'},
{'id': '358',
'name': 'boom',
'columnType': 'DATE_LIST'}]
now_millis = int(round(time.time() * 1000))
row = ('["foo", "bar"]', '[1,2,3]', '[true, false]', '[' + str(now_millis) + ']')
assert (
cast_values(row, selectColumns) ==
[["foo", "bar"], [1, 2, 3], [True, False], [from_unix_epoch_time(now_millis)]]
)
def test_schema():
schema = Schema(name='My Table', parent="syn1000001")
assert not schema.has_columns()
schema.addColumn(Column(id='1', name='Name', columnType='STRING'))
assert schema.has_columns()
assert schema.properties.columnIds == ['1']
schema.removeColumn('1')
assert not schema.has_columns()
assert schema.properties.columnIds == []
schema = Schema(name='Another Table', parent="syn1000001")
schema.addColumns([
Column(name='Name', columnType='STRING'),
Column(name='Born', columnType='INTEGER'),
Column(name='Hipness', columnType='DOUBLE'),
Column(name='Living', columnType='BOOLEAN')])
assert schema.has_columns()
assert len(schema.columns_to_store) == 4
assert Column(name='Name', columnType='STRING') in schema.columns_to_store
assert Column(name='Born', columnType='INTEGER') in schema.columns_to_store
assert Column(name='Hipness', columnType='DOUBLE') in schema.columns_to_store
assert Column(name='Living', columnType='BOOLEAN') in schema.columns_to_store
schema.removeColumn(Column(name='Living', columnType='BOOLEAN'))
assert schema.has_columns()
assert len(schema.columns_to_store) == 3
assert Column(name='Living', columnType='BOOLEAN') not in schema.columns_to_store
assert Column(name='Hipness', columnType='DOUBLE') in schema.columns_to_store
def test_RowSetTable():
row_set_json = {
'etag': 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee',
'headers': [
{'columnType': 'STRING', 'id': '353', 'name': 'name'},
{'columnType': 'DOUBLE', 'id': '355', 'name': 'x'},
{'columnType': 'DOUBLE', 'id': '3020', 'name': 'y'},
{'columnType': 'INTEGER', 'id': '891', 'name': 'n'}],
'rows': [{
'rowId': 5,
'values': ['foo', '1.23', '2.2', '101'],
'versionNumber': 3},
{'rowId': 6,
'values': ['bar', '1.34', '2.4', '101'],
'versionNumber': 3},
{'rowId': 7,
'values': ['foo', '1.23', '2.2', '101'],
'versionNumber': 4},
{'rowId': 8,
'values': ['qux', '1.23', '2.2', '102'],
'versionNumber': 3}],
'tableId': 'syn2976298'}
row_set = RowSet.from_json(row_set_json)
assert row_set.etag == 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
assert row_set.tableId == 'syn2976298'
assert len(row_set.headers) == 4
assert len(row_set.rows) == 4
schema = Schema(id="syn2976298", name="Bogus Schema", columns=[353, 355, 3020, 891], parent="syn1000001")
table = Table(schema, row_set)
assert table.etag == 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
assert table.tableId == 'syn2976298'
assert len(table.headers) == 4
assert len(table.asRowSet().rows) == 4
df = table.asDataFrame()
assert df.shape == (4, 4)
assert list(df['name']) == ['foo', 'bar', 'foo', 'qux']
def test_as_table_columns__with_pandas_DataFrame():
df = pd.DataFrame({
'foobar': ("foo", "bar", "baz", "qux", "asdf"),
'x': tuple(math.pi*i for i in range(5)),
'n': (101, 202, 303, 404, 505),
'really': (False, True, False, True, False),
'size': ('small', 'large', 'medium', 'medium', 'large')},
columns=['foobar', 'x', 'n', 'really', 'size'])
cols = as_table_columns(df)
expected_columns = [
{'defaultValue': '',
'columnType': 'STRING',
'name': 'foobar',
'maximumSize': 30,
'concreteType': 'org.sagebionetworks.repo.model.table.ColumnModel'},
{'columnType': 'DOUBLE',
'name': 'x',
u'concreteType': 'org.sagebionetworks.repo.model.table.ColumnModel'},
{'columnType': 'INTEGER',
'name': 'n',
'concreteType': 'org.sagebionetworks.repo.model.table.ColumnModel'},
{'columnType': 'BOOLEAN',
'name': 'really',
'concreteType': 'org.sagebionetworks.repo.model.table.ColumnModel'},
{'defaultValue': '',
'columnType': 'STRING',
'name': 'size',
'maximumSize': 30,
'concreteType': 'org.sagebionetworks.repo.model.table.ColumnModel'}
]
assert expected_columns == cols
def test_as_table_columns__with_non_supported_input_type():
pytest.raises(ValueError, as_table_columns, dict(a=[1, 2, 3], b=["c", "d", "e"]))
def test_as_table_columns__with_csv_file():
string_io = StringIOContextManager(
'ROW_ID,ROW_VERSION,Name,Born,Hipness,Living\n'
'"1", "1", "<NAME>", 1926, 8.65, False\n'
'"2", "1", "<NAME>", 1926, 9.87, False'
)
cols = as_table_columns(string_io)
assert cols[0]['name'] == 'Name'
assert cols[0]['columnType'] == 'STRING'
assert cols[1]['name'] == 'Born'
assert cols[1]['columnType'] == 'INTEGER'
assert cols[2]['name'] == 'Hipness'
assert cols[2]['columnType'] == 'DOUBLE'
assert cols[3]['name'] == 'Living'
assert cols[3]['columnType'] == 'STRING'
def test_dict_to_table():
d = dict(a=[1, 2, 3], b=["c", "d", "e"])
df = pd.DataFrame(d)
schema = Schema(name="Baz", parent="syn12345", columns=as_table_columns(df))
with patch.object(CsvFileTable, "from_data_frame") as mocked_from_data_frame:
Table(schema, d)
# call_agrs is a tuple with values and name
agrs_list = mocked_from_data_frame.call_args[0]
# getting the second argument
df_agr = agrs_list[1]
assert df_agr.equals(df)
def test_pandas_to_table():
df = pd.DataFrame(dict(a=[1, 2, 3], b=["c", "d", "e"]))
schema = Schema(name="Baz", parent="syn12345", columns=as_table_columns(df))
# A dataframe with no row id and version
table = Table(schema, df)
for i, row in enumerate(table):
assert row[0] == (i + 1)
assert row[1] == ["c", "d", "e"][i]
assert len(table) == 3
# If includeRowIdAndRowVersion=True, include empty row id an versions
# ROW_ID,ROW_VERSION,a,b
# ,,1,c
# ,,2,d
# ,,3,e
table = Table(schema, df, includeRowIdAndRowVersion=True)
for i, row in enumerate(table):
assert row[0] is None
assert row[1] is None
assert row[2] == (i + 1)
# A dataframe with no row id and version
df = pd.DataFrame(index=["1_7", "2_7", "3_8"], data=dict(a=[100, 200, 300], b=["c", "d", "e"]))
table = Table(schema, df)
for i, row in enumerate(table):
assert row[0] == ["1", "2", "3"][i]
assert row[1] == ["7", "7", "8"][i]
assert row[2] == (i + 1) * 100
assert row[3] == ["c", "d", "e"][i]
# A dataframe with row id and version in columns
df = pd.DataFrame(dict(ROW_ID=["0", "1", "2"], ROW_VERSION=["8", "9", "9"], a=[100, 200, 300], b=["c", "d", "e"]))
table = Table(schema, df)
for i, row in enumerate(table):
assert row[0] == ["0", "1", "2"][i]
assert row[1] == ["8", "9", "9"][i]
assert row[2] == (i + 1) * 100
assert row[3] == ["c", "d", "e"][i]
def test_csv_table():
# Maybe not truly a unit test, but here because it doesn't do
# network IO to synapse
data = [["1", "1", "<NAME>", 1926, 8.65, False],
["2", "1", "<NAME>", 1926, 9.87, False],
["3", "1", "<NAME>", 1929, 7.65, False],
["4", "1", "<NAME>", 1935, 5.14, False],
["5", "1", "<NAME>", 1929, 5.78, True],
["6", "1", "<NAME>", 1936, 4.21, False],
["7", "1", "<NAME>", 1930, 8.99, True],
["8", "1", "<NAME>", 1931, 4.37, True]]
filename = None
cols = [Column(id='1', name='Name', columnType='STRING'),
Column(id='2', name='Born', columnType='INTEGER'),
Column(id='3', name='Hipness', columnType='DOUBLE'),
Column(id='4', name='Living', columnType='BOOLEAN')]
schema1 = Schema(id='syn1234', name='<NAME>', columns=cols, parent="syn1000001")
# TODO: use StringIO.StringIO(data) rather than writing files
try:
# create CSV file
with tempfile.NamedTemporaryFile(delete=False) as temp:
filename = temp.name
with io.open(filename, mode='w', encoding="utf-8", newline='') as temp:
writer = csv.writer(temp, quoting=csv.QUOTE_NONNUMERIC, lineterminator=str(os.linesep))
headers = ['ROW_ID', 'ROW_VERSION'] + [col.name for col in cols]
writer.writerow(headers)
for row in data:
writer.writerow(row)
table = Table(schema1, filename)
assert isinstance(table, CsvFileTable)
# need to set column headers to read a CSV file
table.setColumnHeaders(
[SelectColumn(name="ROW_ID", columnType="STRING"),
SelectColumn(name="ROW_VERSION", columnType="STRING")] +
[SelectColumn.from_column(col) for col in cols])
# test iterator
for table_row, expected_row in zip(table, data):
assert table_row == expected_row
# test asRowSet
rowset = table.asRowSet()
for rowset_row, expected_row in zip(rowset.rows, data):
assert rowset_row['values'] == expected_row[2:]
assert rowset_row['rowId'] == expected_row[0]
assert rowset_row['versionNumber'] == expected_row[1]
df = table.asDataFrame()
assert list(df['Name']) == [row[2] for row in data]
assert list(df['Born']) == [row[3] for row in data]
assert list(df['Living']) == [row[5] for row in data]
assert list(df.index) == ['%s_%s' % tuple(row[0:2]) for row in data]
assert df.shape == (8, 4)
except Exception:
if filename:
try:
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
except Exception as ex:
print(ex)
raise
def test_list_of_rows_table():
data = [["<NAME>", 1926, 8.65, False],
["<NAME>", 1926, 9.87, False],
["<NAME>", 1929, 7.65, False],
["<NAME>", 1935, 5.14, False],
["<NAME>", 1929, 5.78, True],
["<NAME>", 1936, 4.21, False],
["<NAME>", 1930, 8.99, True],
["<NAME>", 1931, 4.37, True]]
cols = [Column(id='1', name='Name', columnType='STRING'),
Column(id='2', name='Born', columnType='INTEGER'),
Column(id='3', name='Hipness', columnType='DOUBLE'),
Column(id='4', name='Living', columnType='BOOLEAN')]
schema1 = Schema(name='Jazz Guys', columns=cols, id="syn1000002", parent="syn1000001")
# need columns to do cast_values w/o storing
table = Table(schema1, data, headers=[SelectColumn.from_column(col) for col in cols])
for table_row, expected_row in zip(table, data):
assert table_row == expected_row
rowset = table.asRowSet()
for rowset_row, expected_row in zip(rowset.rows, data):
assert rowset_row['values'] == expected_row
table.columns = cols
df = table.asDataFrame()
assert list(df['Name']) == [r[0] for r in data]
def test_aggregate_query_result_to_data_frame():
class MockSynapse(object):
def _queryTable(self, query, limit=None, offset=None, isConsistent=True, partMask=None):
return {'concreteType': 'org.sagebionetworks.repo.model.table.QueryResultBundle',
'maxRowsPerPage': 2,
'queryCount': 4,
'queryResult': {
'concreteType': 'org.sagebionetworks.repo.model.table.QueryResult',
'nextPageToken': 'aaaaaaaa',
'queryResults': {'etag': 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee',
'headers': [
{'columnType': 'STRING', 'name': 'State'},
{'columnType': 'INTEGER', 'name': 'MIN(Born)'},
{'columnType': 'INTEGER', 'name': 'COUNT(State)'},
{'columnType': 'DOUBLE', 'name': 'AVG(Hipness)'}],
'rows': [
{'values': ['PA', '1935', '2', '1.1']},
{'values': ['MO', '1928', '3', '2.38']}],
'tableId': 'syn2757980'}},
'selectColumns': [{
'columnType': 'STRING',
'id': '1387',
'name': 'State'}]}
def _queryTableNext(self, nextPageToken, tableId):
return {'concreteType': 'org.sagebionetworks.repo.model.table.QueryResult',
'queryResults': {'etag': 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee',
'headers': [
{'columnType': 'STRING', 'name': 'State'},
{'columnType': 'INTEGER', 'name': 'MIN(Born)'},
{'columnType': 'INTEGER', 'name': 'COUNT(State)'},
{'columnType': 'DOUBLE', 'name': 'AVG(Hipness)'}],
'rows': [
{'values': ['DC', '1929', '1', '3.14']},
{'values': ['NC', '1926', '1', '4.38']}],
'tableId': 'syn2757980'}}
result = TableQueryResult(synapse=MockSynapse(),
query="select State, min(Born), count(State), avg(Hipness) from syn2757980 "
"group by Living")
assert result.etag == 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
assert result.tableId == 'syn2757980'
assert len(result.headers) == 4
rs = result.asRowSet()
assert len(rs.rows) == 4
result = TableQueryResult(synapse=MockSynapse(),
query="select State, min(Born), count(State), avg(Hipness) from syn2757980"
" group by Living")
df = result.asDataFrame()
assert df.shape == (4, 4)
assert list(df['State'].values) == ['PA', 'MO', 'DC', 'NC']
# check integer, double and boolean types after PLFM-3073 is fixed
assert list(df['MIN(Born)'].values) == [1935, 1928, 1929, 1926], "Unexpected values" + str(df['MIN(Born)'].values)
assert list(df['COUNT(State)'].values) == [2, 3, 1, 1]
assert list(df['AVG(Hipness)'].values) == [1.1, 2.38, 3.14, 4.38]
def test_waitForAsync():
syn = Synapse(debug=True, skip_checks=True)
syn.table_query_timeout = 0.05
syn.table_query_max_sleep = 0.001
syn.restPOST = MagicMock(return_value={"token": "<PASSWORD>"})
# return a mocked http://docs.synapse.org/rest/org/sagebionetworks/repo/model/asynch/AsynchronousJobStatus.html
syn.restGET = MagicMock(return_value={
"jobState": "PROCESSING",
"progressMessage": "Test progress message",
"progressCurrent": 10,
"progressTotal": 100,
"errorMessage": "Totally fubared error",
"errorDetails": "Totally fubared error details"})
pytest.raises(SynapseTimeoutError, syn._waitForAsync, uri="foo/bar",
request={"foo": "bar"})
def _insert_dataframe_column_if_not_exist__setup():
df = pd.DataFrame()
column_name = "panda"
data = ["pandas", "are", "alive", ":)"]
return df, column_name, data
def test_insert_dataframe_column_if_not_exist__nonexistent_column():
df, column_name, data = _insert_dataframe_column_if_not_exist__setup()
# method under test
CsvFileTable._insert_dataframe_column_if_not_exist(df, 0, column_name, data)
# make sure the data was inserted
assert data == df[column_name].tolist()
def test_insert_dataframe_column_if_not_exist__existing_column_matching():
df, column_name, data = _insert_dataframe_column_if_not_exist__setup()
# add the same data to the DataFrame prior to calling our method
df.insert(0, column_name, data)
# method under test
CsvFileTable._insert_dataframe_column_if_not_exist(df, 0, column_name, data)
# make sure the data has not changed
assert data == df[column_name].tolist()
def test_insert_dataframe_column_if_not_exist__existing_column_not_matching():
df, column_name, data = _insert_dataframe_column_if_not_exist__setup()
# add different data to the DataFrame prior to calling our method
df.insert(0, column_name, ['mercy', 'main', 'btw'])
# make sure the data is different
assert data != df[column_name].tolist()
# method under test should raise exception
with pytest.raises(SynapseError):
CsvFileTable._insert_dataframe_column_if_not_exist(df, 0, column_name, data)
@pytest.mark.parametrize('downloadLocation', [None, '/tmp/download'])
def test_downloadTableColumns(syn, downloadLocation):
header = MagicMock()
header.name = 'id'
table = MagicMock(
tableId='abc',
headers=[header],
__iter__=MagicMock(
return_value=iter([[1], [2], [3]])
)
)
mock_async_response = {
'resultZipFileHandleId': 4,
'fileSummary': [
{
'status': 'SUCCESS',
'fileHandleId': 1,
'zipEntryName': 'entry1',
},
{
'status': 'SUCCESS',
'fileHandleId': 3,
'zipEntryName': 'entry2',
},
]
}
zip_file_path = '/tmp/foo/bar.csv'
zip_entry_file_paths = [
'/tmp/foo/entry1',
'/tmp/foo/entry2',
]
cached_paths = [
None,
'/tmp/foo',
None
]
expected_result = {
1: zip_entry_file_paths[0],
2: cached_paths[1],
3: zip_entry_file_paths[1],
}
with patch.object(syn, 'cache') as mock_cache, \
patch.object(syn, '_waitForAsync') as mock_async, \
patch.object(syn, '_ensure_download_location_is_directory') as mock_ensure_dir, \
patch.object(syn, '_downloadFileHandle') as mock_download_file_handle, \
patch.object(client, 'zipfile'), \
patch.object(client, 'extract_zip_file_to_directory') as mock_extract_zip_file_to_directory:
mock_cache.get.side_effect = cached_paths
mock_async.return_value = mock_async_response
mock_ensure_dir.return_value = mock_cache.get_cache_dir.return_value = '/tmp/download'
mock_download_file_handle.return_value = zip_file_path
mock_extract_zip_file_to_directory.side_effect = zip_entry_file_paths
result = syn.downloadTableColumns(table, ['id'], downloadLocation=downloadLocation)
if downloadLocation:
mock_ensure_dir.assert_called_once_with(downloadLocation)
else:
assert [call(1), call(3)] == mock_cache.get_cache_dir.call_args_list
assert expected_result == result
def test_build_table_download_file_handle_list__repeated_file_handles(syn):
# patch the cache so we don't look there in case FileHandle ids actually exist there
patch.object(syn.cache, "get", return_value=None)
cols = [
Column(name='Name', columnType='STRING', maximumSize=50),
Column(name='filehandle', columnType='FILEHANDLEID')]
schema = Schema(name='FileHandleTest', columns=cols, parent='syn420')
# using some large filehandle numbers so i don
data = [["ayy lmao", 5318008],
["large numberino", 0x5f3759df],
["repeated file handle", 5318008],
["repeated file handle also", 0x5f3759df]]
# need columns to do cast_values w/o storing
table = Table(schema, data, headers=[SelectColumn.from_column(col) for col in cols])
file_handle_associations, file_handle_to_path_map = syn._build_table_download_file_handle_list(table,
['filehandle'],
None)
# verify only 2 file_handles are added (repeats were ignored)
assert 2 == len(file_handle_associations)
assert 0 == len(file_handle_to_path_map)
def test_SubmissionViewSchema__default_params():
submission_view = SubmissionViewSchema(parent="idk")
assert [] == submission_view.scopeIds
assert submission_view.addDefaultViewColumns
def test_SubmissionViewSchema__before_synapse_store(syn):
syn = Synapse(debug=True, skip_checks=True)
with patch.object(syn, '_get_default_view_columns') as mocked_get_default,\
patch.object(syn, '_get_annotation_view_columns') as mocked_get_annotations,\
patch.object(SchemaBase, "_before_synapse_store"):
submission_view = SubmissionViewSchema(scopes=['123'], parent="idk")
submission_view._before_synapse_store(syn)
mocked_get_default.assert_called_once_with("submissionview",
view_type_mask=None)
mocked_get_annotations.assert_called_once_with(['123'],
"submissionview",
view_type_mask=None)
def test_EntityViewSchema__before_synapse_store(syn):
syn = Synapse(debug=True, skip_checks=True)
with patch.object(syn, '_get_default_view_columns') as mocked_get_default,\
patch.object(syn, '_get_annotation_view_columns') as mocked_get_annotations,\
patch.object(SchemaBase, "_before_synapse_store"):
submission_view = EntityViewSchema(scopes=['syn123'], parent="idk")
submission_view._before_synapse_store(syn)
mocked_get_default.assert_called_once_with("entityview",
view_type_mask=1)
mocked_get_annotations.assert_called_once_with(['syn123'],
"entityview",
view_type_mask=1)
def test_EntityViewSchema__default_params():
entity_view = EntityViewSchema(parent="idk")
assert EntityViewType.FILE.value == entity_view.viewTypeMask
assert [] == entity_view.scopeIds
assert entity_view.addDefaultViewColumns is True
def test_entityViewSchema__specified_deprecated_type():
view_type = 'project'
entity_view = EntityViewSchema(parent="idk", type=view_type)
assert EntityViewType.PROJECT.value == entity_view.viewTypeMask
assert entity_view.get('type') is None
def test_entityViewSchema__specified_deprecated_type_in_properties():
view_type = 'project'
properties = {'type': view_type}
entity_view = EntityViewSchema(parent="idk", properties=properties)
assert EntityViewType.PROJECT.value == entity_view.viewTypeMask
assert entity_view.get('type') is None
def test_entityViewSchema__specified_viewTypeMask():
entity_view = EntityViewSchema(parent="idk", includeEntityTypes=[EntityViewType.PROJECT])
assert EntityViewType.PROJECT.value == entity_view.viewTypeMask
assert entity_view.get('type') is None
def test_entityViewSchema__specified_both_type_and_viewTypeMask():
entity_view = EntityViewSchema(parent="idk", type='folder', includeEntityTypes=[EntityViewType.PROJECT])
assert EntityViewType.PROJECT.value == entity_view.viewTypeMask
assert entity_view.get('type') is None
def test_entityViewSchema__sepcified_scopeId():
scopeId = ["123"]
entity_view = EntityViewSchema(parent="idk", scopeId=scopeId)
assert scopeId == entity_view.scopeId
def test_entityViewSchema__sepcified_add_default_columns():
entity_view = EntityViewSchema(parent="idk", addDefaultViewColumns=False)
assert not entity_view.addDefaultViewColumns
def test_entityViewSchema__add_default_columns_when_from_Synapse():
properties = {u'concreteType': u'org.sagebionetworks.repo.model.table.EntityView'}
entity_view = EntityViewSchema(parent="idk", addDefaultViewColumns=True, properties=properties)
assert not entity_view.addDefaultViewColumns
def test_entityViewSchema__add_scope():
entity_view = EntityViewSchema(parent="idk")
entity_view.add_scope(Entity(parent="also idk", id=123))
entity_view.add_scope(456)
entity_view.add_scope("789")
assert [str(x) for x in ["123", "456", "789"]] == entity_view.scopeIds
def test_Schema__max_column_check(syn):
table = Schema(name="someName", parent="idk")
table.addColumns(Column(name="colNum%s" % i, columnType="STRING")
for i in range(MAX_NUM_TABLE_COLUMNS + 1))
pytest.raises(ValueError, syn.store, table)
def test_EntityViewSchema__ignore_column_names_set_info_preserved():
"""
tests that ignoredAnnotationColumnNames will be preserved after creating a new EntityViewSchema from properties,
local_state, and annotations
"""
ignored_names = {'a', 'b', 'c'}
entity_view = EntityViewSchema("someName", parent="syn123", ignoredAnnotationColumnNames={'a', 'b', 'c'})
properties, annotations, local_state = split_entity_namespaces(entity_view)
entity_view_copy = Entity.create(properties, annotations, local_state)
assert ignored_names == entity_view.ignoredAnnotationColumnNames
assert ignored_names == entity_view_copy.ignoredAnnotationColumnNames
def test_EntityViewSchema__ignore_annotation_column_names(syn):
syn = Synapse(debug=True, skip_checks=True)
scopeIds = ['123']
entity_view = EntityViewSchema("someName", scopes=scopeIds, parent="syn123", ignoredAnnotationColumnNames={'long1'},
addDefaultViewColumns=False, addAnnotationColumns=True)
mocked_annotation_result1 = [Column(name='long1', columnType='INTEGER'), Column(name='long2',
columnType='INTEGER')]
with patch.object(syn, '_get_annotation_view_columns', return_value=mocked_annotation_result1)\
as mocked_get_annotations,\
patch.object(syn, 'getColumns') as mocked_get_columns,\
patch.object(SchemaBase, "_before_synapse_store"):
entity_view._before_synapse_store(syn)
mocked_get_columns.assert_called_once_with([])
mocked_get_annotations.assert_called_once_with(scopeIds, "entityview",
view_type_mask=EntityViewType.FILE.value)
assert [Column(name='long2', columnType='INTEGER')] == entity_view.columns_to_store
def test_EntityViewSchema__repeated_columnName_different_type(syn):
syn = Synapse(debug=True, skip_checks=True)
scopeIds = ['123']
entity_view = EntityViewSchema("someName", scopes=scopeIds, parent="syn123")
columns = [Column(name='annoName', columnType='INTEGER'),
Column(name='annoName', columnType='DOUBLE')]
with patch.object(syn, 'getColumns') as mocked_get_columns:
filtered_results = entity_view._filter_duplicate_columns(syn, columns)
mocked_get_columns.assert_called_once_with([])
assert 2 == len(filtered_results)
assert columns == filtered_results
def test_EntityViewSchema__repeated_columnName_same_type(syn):
syn = Synapse(debug=True, skip_checks=True)
entity_view = EntityViewSchema("someName", parent="syn123")
columns = [Column(name='annoName', columnType='INTEGER'),
Column(name='annoName', columnType='INTEGER')]
with patch.object(syn, 'getColumns') as mocked_get_columns:
filtered_results = entity_view._filter_duplicate_columns(syn, columns)
mocked_get_columns.assert_called_once_with([])
assert 1 == len(filtered_results)
assert Column(name='annoName', columnType='INTEGER') == filtered_results[0]
def test_rowset_asDataFrame__with_ROW_ETAG_column(syn):
query_result = {
'concreteType': 'org.sagebionetworks.repo.model.table.QueryResultBundle',
'maxRowsPerPage': 6990,
'selectColumns': [
{'id': '61770', 'columnType': 'STRING', 'name': 'annotationColumn1'},
{'id': '61771', 'columnType': 'STRING', 'name': 'annotationColumn2'}
],
'queryCount': 1,
'queryResult': {
'concreteType': 'org.sagebionetworks.repo.model.table.QueryResult',
'nextPageToken': 'sometoken',
'queryResults': {
'headers': [
{'id': '61770', 'columnType': 'STRING', 'name': 'annotationColumn1'},
{'id': '61771', 'columnType': 'STRING', 'name': 'annotationColumn2'}],
'concreteType': 'org.sagebionetworks.repo.model.table.RowSet',
'etag': 'DEFAULT',
'tableId': 'syn11363411',
'rows': [{'values': ['initial_value1', 'initial_value2'],
'etag': '7de0f326-9ef7-4fde-9e4a-ac0babca73f6',
'rowId': 123,
'versionNumber':456}]
}
}
}
query_result_next_page = {'concreteType': 'org.sagebionetworks.repo.model.table.QueryResult',
'queryResults': {
'etag': 'DEFAULT',
'headers': [
{'id': '61770', 'columnType': 'STRING', 'name': 'annotationColumn1'},
{'id': '61771', 'columnType': 'STRING', 'name': 'annotationColumn2'}],
'rows': [{'values': ['initial_value3', 'initial_value4'],
'etag': '7de0f326-9ef7-4fde-9e4a-ac0babca73f7',
'rowId': 789,
'versionNumber': 101112}],
'tableId': 'syn11363411'}}
with patch.object(syn, "_queryTable", return_value=query_result),\
patch.object(syn, "_queryTableNext", return_value=query_result_next_page):
table = syn.tableQuery("select something from syn123", resultsAs='rowset')
dataframe = table.asDataFrame()
assert "ROW_ETAG" not in dataframe.columns
expected_indicies = ['123_456_7de0f326-9ef7-4fde-9e4a-ac0babca73f6',
'789_101112_7de0f326-9ef7-4fde-9e4a-ac0babca73f7']
assert expected_indicies == dataframe.index.values.tolist()
def test_RowSetTable_len():
schema = Schema(parentId="syn123", id='syn456', columns=[Column(name='column_name', id='123')])
rowset = RowSet(schema=schema, rows=[Row(['first row']), Row(['second row'])])
row_set_table = RowSetTable(schema, rowset)
assert 2 == len(row_set_table)
def test_build_table__with_pandas_DataFrame():
df = pd.DataFrame(dict(a=[1, 2, 3], b=["c", "d", "e"]))
table = build_table("test", "syn123", df)
for i, row in enumerate(table):
assert row[0] == (i + 1)
assert row[1] == ["c", "d", "e"][i]
assert len(table) == 3
headers = [
{'name': 'a', 'columnType': 'INTEGER'},
{'name': 'b', 'columnType': 'STRING'}
]
assert headers == table.headers
def test_build_table__with_csv():
string_io = StringIOContextManager('a,b\n'
'1,c\n'
'2,d\n'
'3,e')
with patch.object(synapseclient.table, "as_table_columns",
return_value=[Column(name="a", columnType="INTEGER"),
Column(name="b", columnType="STRING")]),\
patch.object(io, "open", return_value=string_io):
table = build_table("test", "syn123", "some_file_name")
for col, row in enumerate(table):
assert row[0] == (col + 1)
assert row[1] == ["c", "d", "e"][col]
assert len(table) == 3
headers = [
{'name': 'a', 'columnType': 'INTEGER'},
{'name': 'b', 'columnType': 'STRING'}
]
assert headers == table.headers
def test_build_table__with_dict():
pytest.raises(ValueError, build_table, "test", "syn123", dict(a=[1, 2, 3], b=["c", "d", "e"]))
class TestTableQueryResult:
@pytest.fixture(autouse=True, scope='function')
def init_syn(self, syn):
self.syn = syn
def setup(self):
self.rows = [{'rowId': 1, 'versionNumber': 2, 'values': ['first_row']},
{'rowId': 5, 'versionNumber': 1, 'values': ['second_row']}]
self.query_result_dict = {'queryResult': {
'queryResults': {
'headers': [
{'columnType': 'STRING', 'name': 'col_name'}],
'rows': self.rows,
'tableId': 'syn123'}},
'selectColumns': [{
'columnType': 'STRING',
'id': '1337',
'name': 'col_name'}]}
self.query_string = "SELECT whatever FROM some_table WHERE sky=blue"
def test_len(self):
with patch.object(self.syn, "_queryTable", return_value=self.query_result_dict) as mocked_table_query:
query_result_table = TableQueryResult(self.syn, self.query_string)
args, kwargs = mocked_table_query.call_args
assert self.query_string == kwargs['query']
assert 2 == len(query_result_table)
def test_iter_metadata__no_etag(self):
with patch.object(self.syn, "_queryTable", return_value=self.query_result_dict):
query_result_table = TableQueryResult(self.syn, self.query_string)
metadata = [x for x in query_result_table.iter_row_metadata()]
assert 2 == len(metadata)
assert (1, 2, None) == metadata[0]
assert (5, 1, None) == metadata[1]
def test_iter_metadata__has_etag(self):
self.rows[0].update({'etag': 'etag1'})
self.rows[1].update({'etag': 'etag2'})
with patch.object(self.syn, "_queryTable", return_value=self.query_result_dict):
query_result_table = TableQueryResult(self.syn, self.query_string)
metadata = [x for x in query_result_table.iter_row_metadata()]
assert 2 == len(metadata)
assert (1, 2, 'etag1') == metadata[0]
assert (5, 1, 'etag2') == metadata[1]
class TestPartialRow:
"""
Testing PartialRow class
"""
def test_constructor__value_not_dict(self):
with pytest.raises(ValueError):
PartialRow([], 123)
def test_constructor__row_id_string_not_castable_to_int(self):
with pytest.raises(ValueError):
PartialRow({}, "fourty-two")
def test_constructor__row_id_is_int_castable_string(self):
partial_row = PartialRow({}, "350")
assert [] == partial_row.values
assert 350 == partial_row.rowId
assert 'etag' not in partial_row
def test_constructor__values_translation(self):
values = OrderedDict([("12345", "rowValue"),
("09876", "otherValue")])
partial_row = PartialRow(values, 711)
expected_values = [{"key": "12345", "value": "rowValue"}, {"key": "09876", "value": "otherValue"}]
assert expected_values == partial_row.values
assert 711 == partial_row.rowId
assert 'etag' not in partial_row
def test_constructor__with_etag(self):
partial_row = PartialRow({}, 420, "my etag")
assert [] == partial_row.values
assert 420 == partial_row.rowId
assert "my etag" == partial_row.etag
def test_constructor__name_to_col_id(self):
values = OrderedDict([("row1", "rowValue"),
("row2", "otherValue")])
names_to_col_id = {"row1": "12345", "row2": "09876"}
partial_row = PartialRow(values, 711, nameToColumnId=names_to_col_id)
expected_values = [{"key": "12345", "value": "rowValue"}, {"key": "09876", "value": "otherValue"}]
assert expected_values == partial_row.values
assert 711 == partial_row.rowId
class TestPartialRowSet:
def test_constructor__not_all_rows_of_type_PartialRow(self):
rows = [PartialRow({}, 123), "some string instead"]
with pytest.raises(ValueError):
PartialRowset("syn123", rows)
def test_constructor__single_PartialRow(self):
partial_row = PartialRow({}, 123)
partial_rowset = PartialRowset("syn123", partial_row)
assert [partial_row] == partial_rowset.rows
class TestCsvFileTable:
def test_iter_metadata__has_etag(self):
string_io = StringIOContextManager("ROW_ID,ROW_VERSION,ROW_ETAG,asdf\n"
"1,2,etag1,\"I like trains\"\n"
"5,1,etag2,\"weeeeeeeeeeee\"\n")
with patch.object(io, "open", return_value=string_io):
csv_file_table = CsvFileTable("syn123", "/fake/file/path")
metadata = [x for x in csv_file_table.iter_row_metadata()]
assert 2 == len(metadata)
assert (1, 2, "etag1") == metadata[0]
assert (5, 1, "etag2") == metadata[1]
def test_iter_metadata__no_etag(self):
string_io = StringIOContextManager("ROW_ID,ROW_VERSION,asdf\n"
"1,2,\"I like trains\"\n"
"5,1,\"weeeeeeeeeeee\"\n")
with patch.object(io, "open", return_value=string_io):
csv_file_table = CsvFileTable("syn123", "/fake/file/path")
metadata = [x for x in csv_file_table.iter_row_metadata()]
assert 2 == len(metadata)
assert (1, 2, None) == metadata[0]
assert (5, 1, None) == metadata[1]
# test __iter__
def test_iter_with_no_headers(self):
# self.headers is None
string_io = StringIOContextManager("ROW_ID,ROW_VERSION,ROW_ETAG,col\n"
"1,2,etag1,\"I like trains\"\n"
"5,1,etag2,\"weeeeeeeeeeee\"\n")
with patch.object(io, "open", return_value=string_io):
table = CsvFileTable("syn123", "/fake/file/path")
iter = table.__iter__()
pytest.raises(ValueError, next, iter)
def test_iter_with_no_headers_in_csv(self):
# csv file does not have headers
string_io = StringIOContextManager("1,2,etag1,\"I like trains\"\n"
"5,1,etag2,\"weeeeeeeeeeee\"\n")
with patch.object(io, "open", return_value=string_io):
table = CsvFileTable("syn123", "/fake/file/path", header=False)
iter = table.__iter__()
pytest.raises(ValueError, next, iter)
def test_iter_row_metadata_mismatch_in_headers(self):
# csv file does not contain row metadata, self.headers does
data = "col1,col2\n" \
"1,2\n" \
"2,1\n"
cols = as_table_columns(StringIOContextManager(data))
headers = [SelectColumn(name="ROW_ID", columnType="STRING"),
SelectColumn(name="ROW_VERSION", columnType="STRING")] + \
[SelectColumn.from_column(col) for col in cols]
with patch.object(io, "open", return_value=StringIOContextManager(data)):
table = CsvFileTable("syn123", "/fake/file/path", headers=headers)
iter = table.__iter__()
pytest.raises(ValueError, next, iter)
def test_iter_with_table_row_metadata(self):
# csv file has row metadata, self.headers does not
data = "ROW_ID,ROW_VERSION,col\n" \
"1,2,\"I like trains\"\n" \
"5,1,\"weeeeeeeeeeee\"\n"
cols = as_table_columns(StringIOContextManager(data))
headers = [SelectColumn.from_column(col) for col in cols]
with patch.object(io, "open", return_value=StringIOContextManager(data)):
table = CsvFileTable("syn123", "/fake/file/path", headers=headers)
expected_rows = [["I like trains"], ["weeeeeeeeeeee"]]
for expected_row, table_row in zip(expected_rows, table):
assert expected_row == table_row
def test_iter_with_mismatch_row_metadata(self):
# self.headers and csv file headers contains mismatch row metadata
data = "ROW_ID,ROW_VERSION,ROW_ETAG,col\n" \
"1,2,etag1,\"I like trains\"\n" \
"5,1,etag2,\"weeeeeeeeeeee\"\n"
cols = as_table_columns(StringIOContextManager(data))
headers = [SelectColumn(name="ROW_ID", columnType="STRING"),
SelectColumn(name="ROW_VERSION", columnType="STRING")] + \
[SelectColumn.from_column(col) for col in cols]
with patch.object(io, "open", return_value=StringIOContextManager(data)):
table = CsvFileTable("syn123", "/fake/file/path", headers=headers)
iter = table.__iter__()
pytest.raises(ValueError, next, iter)
def test_iter_no_row_metadata(self):
# both csv headers and self.headers do not contains row metadata
data = "col1,col2\n" \
"1,2\n" \
"2,1\n"
cols = as_table_columns(StringIOContextManager(data))
headers = [SelectColumn.from_column(col) for col in cols]
with patch.object(io, "open", return_value=StringIOContextManager(data)):
table = CsvFileTable("syn123", "/fake/file/path", headers=headers)
expected_rows = [[1, 2], [2, 1]]
for expected_row, table_row in zip(expected_rows, table):
assert expected_row == table_row
def test_iter_with_file_view_row_metadata(self):
# csv file and self.headers contain matching row metadata
data = "ROW_ID,ROW_VERSION,ROW_ETAG,col\n" \
"1,2,etag1,\"I like trains\"\n" \
"5,1,etag2,\"weeeeeeeeeeee\"\n"
cols = as_table_columns(StringIOContextManager(data))
headers = [SelectColumn(name="ROW_ID", columnType="STRING"),
SelectColumn(name="ROW_VERSION", columnType="STRING"),
SelectColumn(name="ROW_ETAG", columnType="STRING")] + \
[SelectColumn.from_column(col) for col in cols]
with patch.object(io, "open", return_value=StringIOContextManager(data)):
table = CsvFileTable("syn123", "/fake/file/path", headers=headers)
expected_rows = [['1', '2', "etag1", "I like trains"],
['5', '1', "etag2", "weeeeeeeeeeee"]]
for expected_row, table_row in zip(expected_rows, table):
assert expected_row == table_row
def test_as_data_frame__no_headers(self):
"""Verify we don't assume a schema has defined headers when converting to a Pandas data frame"""
data = {
'ROW_ID': ['1', '5'],
'ROW_VERSION': ['2', '1'],
'ROW_ETAG': ['etag1', 'etag2'],
'col': ['I like trains', 'weeeeeeeeeeee']
}
pd_df = pd.DataFrame(data)
expected_df = pd.DataFrame(
index=['1_2_etag1', '5_1_etag2'],
columns=['col'],
data=data['col'],
)
with patch.object(pd, 'read_csv') as mock_read_csv:
mock_read_csv.return_value = pd_df
table = CsvFileTable('syn123', '/fake/file/path')
df = table.asDataFrame()
pd.testing.assert_frame_equal(expected_df, df)
def test_as_data_frame__list_columns(self):
"""Verify list columns are represented as expected when converted to a Pandas dataframe"""
data = {
'string_list': ['["foo", "bar"]', '["wizzle", "wozzle"]'],
'integer_list': ['[1, 5]', '[2, 1]'],
'boolean_list': ['[true, false]', '[false, true]'],
'fill': [None, None],
}
pd_df = | pd.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from datetime import timedelta
from distutils.version import LooseVersion
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas import (
DatetimeIndex, Int64Index, Series, Timedelta, TimedeltaIndex, Timestamp,
date_range, timedelta_range
)
from pandas.errors import NullFrequencyError
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(params=['B', 'D'])
def freq(request):
return request.param
class TestTimedeltaIndexArithmetic(object):
# Addition and Subtraction Operations
# -------------------------------------------------------------
# TimedeltaIndex.shift is used by __add__/__sub__
def test_tdi_shift_empty(self):
# GH#9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
def test_tdi_shift_hours(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_tdi_shift_minutes(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_tdi_shift_int(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
tm.assert_index_equal(result, expected)
def test_tdi_shift_nonstandard_freq(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
tm.assert_index_equal(result, expected)
def test_shift_no_freq(self):
# GH#19147
tdi = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00'], freq=None)
with pytest.raises(NullFrequencyError):
tdi.shift(2)
# -------------------------------------------------------------
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and integer
def test_tdi_add_int(self, one):
# Variants of `one` for #19012
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + one
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_iadd_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
rng += one
tm.assert_index_equal(rng, expected)
def test_tdi_sub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - one
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_isub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_add_integer_array(self, box):
# GH#19959
rng = timedelta_range('1 days 09:00:00', freq='H', periods=3)
other = box([4, 3, 2])
expected = TimedeltaIndex(['1 day 13:00:00'] * 3)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_sub_integer_array(self, box):
# GH#19959
rng = timedelta_range('9H', freq='H', periods=3)
other = box([4, 3, 2])
expected = TimedeltaIndex(['5H', '7H', '9H'])
result = rng - other
tm.assert_index_equal(result, expected)
result = other - rng
tm.assert_index_equal(result, -expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_addsub_integer_array_no_freq(self, box):
# GH#19959
tdi = TimedeltaIndex(['1 Day', 'NaT', '3 Hours'])
other = box([14, -1, 16])
with pytest.raises(NullFrequencyError):
tdi + other
with pytest.raises(NullFrequencyError):
other + tdi
with pytest.raises(NullFrequencyError):
tdi - other
with pytest.raises(NullFrequencyError):
other - tdi
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and timedelta-like
# Note: add and sub are tested in tests.test_arithmetic
def test_tdi_iadd_timedeltalike(self, delta):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng += delta
tm.assert_index_equal(rng, expected)
def test_tdi_isub_timedeltalike(self, delta):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
pytest.raises(TypeError, lambda: tdi - dt)
pytest.raises(TypeError, lambda: tdi - dti)
pytest.raises(TypeError, lambda: td - dt)
pytest.raises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
pytest.raises(TypeError, lambda: dt_tz - ts)
pytest.raises(TypeError, lambda: dt_tz - dt)
pytest.raises(TypeError, lambda: dt_tz - ts_tz2)
pytest.raises(TypeError, lambda: dt - dt_tz)
pytest.raises(TypeError, lambda: ts - dt_tz)
pytest.raises(TypeError, lambda: ts_tz2 - ts)
pytest.raises(TypeError, lambda: ts_tz2 - dt)
pytest.raises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
pytest.raises(TypeError, lambda: dti - ts_tz)
pytest.raises(TypeError, lambda: dti_tz - ts)
pytest.raises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
pytest.raises(ValueError, lambda: tdi + dti[0:1])
pytest.raises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
pytest.raises(NullFrequencyError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
tm.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other + td, expected)
pytest.raises(TypeError, lambda: td + np.array([1]))
pytest.raises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
tm.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(-other + td, expected)
pytest.raises(TypeError, lambda: td - np.array([1]))
pytest.raises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
pytest.raises(TypeError, lambda: td * other)
pytest.raises(TypeError, lambda: other * td)
tm.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
tm.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
tm.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other - td, expected)
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
def test_tdi_ops_attributes(self):
rng = timedelta_range('2 days', periods=5, freq='2D', name='x')
result = rng + 1
exp = timedelta_range('4 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
result = rng - 2
exp = timedelta_range('-2 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
result = rng * 2
exp = timedelta_range('4 days', periods=5, freq='4D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4D'
result = rng / 2
exp = timedelta_range('1 days', periods=5, freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
result = -rng
exp = | timedelta_range('-2 days', periods=5, freq='-2D', name='x') | pandas.timedelta_range |
#Import all packages needed
import pandas as pd
import numpy as np
from config import username, password
from progressbar import progressbar
from webull import webull
# https://github.com/tedchou12/webull/wiki/MFA
#login to webull-API/authenticate with API
wb = webull()
wb.login(username, password)
#Grab all active stocks on webull-API
data = wb.get_active_gainer_loser('active', 9999)
#Convert to dataframe
data = | pd.DataFrame(data) | pandas.DataFrame |
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from datetime import time
from os.path import abspath, dirname, join
from unittest import TestCase
import typing
import re
import functools
import itertools
import pathlib
from collections import abc
import pytest
import numpy as np
import pandas as pd
import pandas.testing as tm
from pandas import Timedelta, read_csv
from parameterized import parameterized
import pytz
from pytz import UTC
from toolz import concat
from exchange_calendars import get_calendar
from exchange_calendars.calendar_utils import (
ExchangeCalendarDispatcher,
_default_calendar_aliases,
_default_calendar_factories,
)
from exchange_calendars.errors import (
CalendarNameCollision,
InvalidCalendarName,
NoSessionsError,
)
from exchange_calendars.exchange_calendar import ExchangeCalendar, days_at_time
from .test_utils import T
class FakeCalendar(ExchangeCalendar):
name = "DMY"
tz = "Asia/Ulaanbaatar"
open_times = ((None, time(11, 13)),)
close_times = ((None, time(11, 49)),)
class CalendarRegistrationTestCase(TestCase):
def setup_method(self, method):
self.dummy_cal_type = FakeCalendar
self.dispatcher = ExchangeCalendarDispatcher({}, {}, {})
def teardown_method(self, method):
self.dispatcher.clear_calendars()
def test_register_calendar(self):
# Build a fake calendar
dummy_cal = self.dummy_cal_type()
# Try to register and retrieve the calendar
self.dispatcher.register_calendar("DMY", dummy_cal)
retr_cal = self.dispatcher.get_calendar("DMY")
self.assertEqual(dummy_cal, retr_cal)
# Try to register again, expecting a name collision
with self.assertRaises(CalendarNameCollision):
self.dispatcher.register_calendar("DMY", dummy_cal)
# Deregister the calendar and ensure that it is removed
self.dispatcher.deregister_calendar("DMY")
with self.assertRaises(InvalidCalendarName):
self.dispatcher.get_calendar("DMY")
def test_register_calendar_type(self):
self.dispatcher.register_calendar_type("DMY", self.dummy_cal_type)
retr_cal = self.dispatcher.get_calendar("DMY")
self.assertEqual(self.dummy_cal_type, type(retr_cal))
def test_both_places_are_checked(self):
dummy_cal = self.dummy_cal_type()
# if instance is registered, can't register type with same name
self.dispatcher.register_calendar("DMY", dummy_cal)
with self.assertRaises(CalendarNameCollision):
self.dispatcher.register_calendar_type("DMY", type(dummy_cal))
self.dispatcher.deregister_calendar("DMY")
# if type is registered, can't register instance with same name
self.dispatcher.register_calendar_type("DMY", type(dummy_cal))
with self.assertRaises(CalendarNameCollision):
self.dispatcher.register_calendar("DMY", dummy_cal)
def test_force_registration(self):
self.dispatcher.register_calendar("DMY", self.dummy_cal_type())
first_dummy = self.dispatcher.get_calendar("DMY")
# force-register a new instance
self.dispatcher.register_calendar("DMY", self.dummy_cal_type(), force=True)
second_dummy = self.dispatcher.get_calendar("DMY")
self.assertNotEqual(first_dummy, second_dummy)
class DefaultsTestCase(TestCase):
def test_default_calendars(self):
dispatcher = ExchangeCalendarDispatcher(
calendars={},
calendar_factories=_default_calendar_factories,
aliases=_default_calendar_aliases,
)
# These are ordered aliases first, so that we can deregister the
# canonical factories when we're done with them, and we'll be done with
# them after they've been used by all aliases and by canonical name.
for name in concat([_default_calendar_aliases, _default_calendar_factories]):
self.assertIsNotNone(
dispatcher.get_calendar(name), "get_calendar(%r) returned None" % name
)
dispatcher.deregister_calendar(name)
class DaysAtTimeTestCase(TestCase):
@parameterized.expand(
[
# NYSE standard day
(
"2016-07-19",
0,
time(9, 31),
pytz.timezone("America/New_York"),
"2016-07-19 9:31",
),
# CME standard day
(
"2016-07-19",
-1,
time(17, 1),
pytz.timezone("America/Chicago"),
"2016-07-18 17:01",
),
# CME day after DST start
(
"2004-04-05",
-1,
time(17, 1),
pytz.timezone("America/Chicago"),
"2004-04-04 17:01",
),
# ICE day after DST start
(
"1990-04-02",
-1,
time(19, 1),
pytz.timezone("America/Chicago"),
"1990-04-01 19:01",
),
]
)
def test_days_at_time(self, day, day_offset, time_offset, tz, expected):
days = pd.DatetimeIndex([pd.Timestamp(day, tz=tz)])
result = days_at_time(days, time_offset, tz, day_offset)[0]
expected = pd.Timestamp(expected, tz=tz).tz_convert(UTC)
self.assertEqual(result, expected)
class ExchangeCalendarTestBase(object):
# Override in subclasses.
answer_key_filename = None
calendar_class = None
# Affects test_start_bound. Should be set to earliest date for which
# calendar can be instantiated, or None if no start bound.
START_BOUND: pd.Timestamp | None = None
# Affects test_end_bound. Should be set to latest date for which
# calendar can be instantiated, or None if no end bound.
END_BOUND: pd.Timestamp | None = None
# Affects tests that care about the empty periods between sessions. Should
# be set to False for 24/7 calendars.
GAPS_BETWEEN_SESSIONS = True
# Affects tests that care about early closes. Should be set to False for
# calendars that don't have any early closes.
HAVE_EARLY_CLOSES = True
# Affects tests that care about late opens. Since most do not, defaulting
# to False.
HAVE_LATE_OPENS = False
# Affects test_for_breaks. True if one or more calendar sessions has a
# break.
HAVE_BREAKS = False
# Affects test_session_has_break.
SESSION_WITH_BREAK = None # None if no session has a break
SESSION_WITHOUT_BREAK = T("2011-06-15") # None if all sessions have breaks
# Affects test_sanity_check_session_lengths. Should be set to the largest
# number of hours that ever appear in a single session.
MAX_SESSION_HOURS = 0
# Affects test_minute_index_to_session_labels.
# Change these if the start/end dates of your test suite don't contain the
# defaults.
MINUTE_INDEX_TO_SESSION_LABELS_START = pd.Timestamp("2011-01-04", tz=UTC)
MINUTE_INDEX_TO_SESSION_LABELS_END = pd.Timestamp("2011-04-04", tz=UTC)
# Affects tests around daylight savings. If possible, should contain two
# dates that are not both in the same daylight savings regime.
DAYLIGHT_SAVINGS_DATES = ["2004-04-05", "2004-11-01"]
# Affects test_start_end. Change these if your calendar start/end
# dates between 2010-01-03 and 2010-01-10 don't match the defaults.
TEST_START_END_FIRST = pd.Timestamp("2010-01-03", tz=UTC)
TEST_START_END_LAST = pd.Timestamp("2010-01-10", tz=UTC)
TEST_START_END_EXPECTED_FIRST = pd.Timestamp("2010-01-04", tz=UTC)
TEST_START_END_EXPECTED_LAST = pd.Timestamp("2010-01-08", tz=UTC)
@staticmethod
def load_answer_key(filename):
"""
Load a CSV from tests/resources/{filename}.csv
"""
fullpath = join(
dirname(abspath(__file__)),
"./resources",
filename + ".csv",
)
return read_csv(
fullpath,
index_col=0,
# NOTE: Merely passing parse_dates=True doesn't cause pandas to set
# the dtype correctly, and passing all reasonable inputs to the
# dtype kwarg cause read_csv to barf.
parse_dates=[0, 1, 2],
date_parser=lambda x: pd.Timestamp(x, tz=UTC),
)
@classmethod
def setup_class(cls):
cls.answers = cls.load_answer_key(cls.answer_key_filename)
cls.start_date = cls.answers.index[0]
cls.end_date = cls.answers.index[-1]
cls.calendar = cls.calendar_class(cls.start_date, cls.end_date)
cls.one_minute = pd.Timedelta(1, "T")
cls.one_hour = pd.Timedelta(1, "H")
cls.one_day = pd.Timedelta(1, "D")
cls.today = pd.Timestamp.now(tz="UTC").floor("D")
@classmethod
def teardown_class(cls):
cls.calendar = None
cls.answers = None
def test_bound_start(self):
if self.START_BOUND is not None:
cal = self.calendar_class(self.START_BOUND, self.today)
self.assertIsInstance(cal, ExchangeCalendar)
start = self.START_BOUND - pd.DateOffset(days=1)
with pytest.raises(ValueError, match=re.escape(f"{start}")):
self.calendar_class(start, self.today)
else:
# verify no bound imposed
cal = self.calendar_class(pd.Timestamp("1902-01-01", tz="UTC"), self.today)
self.assertIsInstance(cal, ExchangeCalendar)
def test_bound_end(self):
if self.END_BOUND is not None:
cal = self.calendar_class(self.today, self.END_BOUND)
self.assertIsInstance(cal, ExchangeCalendar)
end = self.END_BOUND + pd.DateOffset(days=1)
with pytest.raises(ValueError, match=re.escape(f"{end}")):
self.calendar_class(self.today, end)
else:
# verify no bound imposed
cal = self.calendar_class(self.today, pd.Timestamp("2050-01-01", tz="UTC"))
self.assertIsInstance(cal, ExchangeCalendar)
def test_sanity_check_session_lengths(self):
# make sure that no session is longer than self.MAX_SESSION_HOURS hours
for session in self.calendar.all_sessions:
o, c = self.calendar.open_and_close_for_session(session)
delta = c - o
self.assertLessEqual(delta.seconds / 3600, self.MAX_SESSION_HOURS)
def test_calculated_against_csv(self):
tm.assert_index_equal(self.calendar.schedule.index, self.answers.index)
def test_adhoc_holidays_specification(self):
"""adhoc holidays should be tz-naive (#33, #39)."""
dti = pd.DatetimeIndex(self.calendar.adhoc_holidays)
assert dti.tz is None
def test_is_open_on_minute(self):
one_minute = pd.Timedelta(minutes=1)
m = self.calendar.is_open_on_minute
for market_minute in self.answers.market_open[1:]:
market_minute_utc = market_minute
# The exchange should be classified as open on its first minute
self.assertTrue(m(market_minute_utc, _parse=False))
if self.GAPS_BETWEEN_SESSIONS:
# Decrement minute by one, to minute where the market was not
# open
pre_market = market_minute_utc - one_minute
self.assertFalse(m(pre_market, _parse=False))
for market_minute in self.answers.market_close[:-1]:
close_minute_utc = market_minute
# should be open on its last minute
self.assertTrue(m(close_minute_utc, _parse=False))
if self.GAPS_BETWEEN_SESSIONS:
# increment minute by one minute, should be closed
post_market = close_minute_utc + one_minute
self.assertFalse(m(post_market, _parse=False))
def _verify_minute(
self,
calendar,
minute,
next_open_answer,
prev_open_answer,
next_close_answer,
prev_close_answer,
):
next_open = calendar.next_open(minute, _parse=False)
self.assertEqual(next_open, next_open_answer)
prev_open = self.calendar.previous_open(minute, _parse=False)
self.assertEqual(prev_open, prev_open_answer)
next_close = self.calendar.next_close(minute, _parse=False)
self.assertEqual(next_close, next_close_answer)
prev_close = self.calendar.previous_close(minute, _parse=False)
self.assertEqual(prev_close, prev_close_answer)
def test_next_prev_open_close(self):
# for each session, check:
# - the minute before the open (if gaps exist between sessions)
# - the first minute of the session
# - the second minute of the session
# - the minute before the close
# - the last minute of the session
# - the first minute after the close (if gaps exist between sessions)
opens = self.answers.market_open.iloc[1:-2]
closes = self.answers.market_close.iloc[1:-2]
previous_opens = self.answers.market_open.iloc[:-1]
previous_closes = self.answers.market_close.iloc[:-1]
next_opens = self.answers.market_open.iloc[2:]
next_closes = self.answers.market_close.iloc[2:]
for (
open_minute,
close_minute,
previous_open,
previous_close,
next_open,
next_close,
) in zip(
opens, closes, previous_opens, previous_closes, next_opens, next_closes
):
minute_before_open = open_minute - self.one_minute
# minute before open
if self.GAPS_BETWEEN_SESSIONS:
self._verify_minute(
self.calendar,
minute_before_open,
open_minute,
previous_open,
close_minute,
previous_close,
)
# open minute
self._verify_minute(
self.calendar,
open_minute,
next_open,
previous_open,
close_minute,
previous_close,
)
# second minute of session
self._verify_minute(
self.calendar,
open_minute + self.one_minute,
next_open,
open_minute,
close_minute,
previous_close,
)
# minute before the close
self._verify_minute(
self.calendar,
close_minute - self.one_minute,
next_open,
open_minute,
close_minute,
previous_close,
)
# the close
self._verify_minute(
self.calendar,
close_minute,
next_open,
open_minute,
next_close,
previous_close,
)
# minute after the close
if self.GAPS_BETWEEN_SESSIONS:
self._verify_minute(
self.calendar,
close_minute + self.one_minute,
next_open,
open_minute,
next_close,
close_minute,
)
def test_next_prev_minute(self):
all_minutes = self.calendar.all_minutes
# test 20,000 minutes because it takes too long to do the rest.
for idx, minute in enumerate(all_minutes[1:20000]):
self.assertEqual(
all_minutes[idx + 2], self.calendar.next_minute(minute, _parse=False)
)
self.assertEqual(
all_minutes[idx], self.calendar.previous_minute(minute, _parse=False)
)
# test a couple of non-market minutes
if self.GAPS_BETWEEN_SESSIONS:
for open_minute in self.answers.market_open[1:]:
hour_before_open = open_minute - self.one_hour
self.assertEqual(
open_minute,
self.calendar.next_minute(hour_before_open, _parse=False),
)
for close_minute in self.answers.market_close[1:]:
hour_after_close = close_minute + self.one_hour
self.assertEqual(
close_minute,
self.calendar.previous_minute(hour_after_close, _parse=False),
)
def test_date_to_session_label(self):
m = self.calendar.date_to_session_label
sessions = self.answers.index[:30] # first 30 sessions
# test for error if request session prior to first calendar session.
date = self.answers.index[0] - self.one_day
error_msg = (
"Cannot get a session label prior to the first calendar"
f" session ('{self.answers.index[0]}'). Consider passing"
" `direction` as 'next'."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(date, "previous", _parse=False)
# direction as "previous"
dates = pd.date_range(sessions[0], sessions[-1], freq="D")
last_session = None
for date in dates:
session_label = m(date, "previous", _parse=False)
if date in sessions:
assert session_label == date
last_session = session_label
else:
assert session_label == last_session
# direction as "next"
last_session = None
for date in dates.sort_values(ascending=False):
session_label = m(date, "next", _parse=False)
if date in sessions:
assert session_label == date
last_session = session_label
else:
assert session_label == last_session
# test for error if request session after last calendar session.
date = self.answers.index[-1] + self.one_day
error_msg = (
"Cannot get a session label later than the last calendar"
f" session ('{self.answers.index[-1]}'). Consider passing"
" `direction` as 'previous'."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(date, "next", _parse=False)
if self.GAPS_BETWEEN_SESSIONS:
not_sessions = dates[~dates.isin(sessions)][:5]
for not_session in not_sessions:
error_msg = (
f"`date` '{not_session}' does not represent a session. Consider"
" passing a `direction`."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(not_session, "none", _parse=False)
# test default behaviour
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(not_session, _parse=False)
# non-valid direction (can only be thrown if gaps between sessions)
error_msg = (
"'not a direction' is not a valid `direction`. Valid `direction`"
' values are "next", "previous" and "none".'
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(not_session, "not a direction", _parse=False)
def test_minute_to_session_label(self):
m = self.calendar.minute_to_session_label
# minute is prior to first session's open
minute_before_first_open = self.answers.iloc[0].market_open - self.one_minute
session_label = self.answers.index[0]
minutes_that_resolve_to_this_session = [
m(minute_before_first_open, _parse=False),
m(minute_before_first_open, direction="next", _parse=False),
]
unique_session_labels = set(minutes_that_resolve_to_this_session)
self.assertTrue(len(unique_session_labels) == 1)
self.assertIn(session_label, unique_session_labels)
with self.assertRaises(ValueError):
m(minute_before_first_open, direction="previous", _parse=False)
with self.assertRaises(ValueError):
m(minute_before_first_open, direction="none", _parse=False)
# minute is between first session's open and last session's close
for idx, (session_label, open_minute, close_minute, _, _) in enumerate(
self.answers.iloc[1:-2].itertuples(name=None)
):
hour_into_session = open_minute + self.one_hour
minute_before_session = open_minute - self.one_minute
minute_after_session = close_minute + self.one_minute
next_session_label = self.answers.index[idx + 2]
previous_session_label = self.answers.index[idx]
# verify that minutes inside a session resolve correctly
minutes_that_resolve_to_this_session = [
m(open_minute, _parse=False),
m(open_minute, direction="next", _parse=False),
m(open_minute, direction="previous", _parse=False),
m(open_minute, direction="none", _parse=False),
m(hour_into_session, _parse=False),
m(hour_into_session, direction="next", _parse=False),
m(hour_into_session, direction="previous", _parse=False),
m(hour_into_session, direction="none", _parse=False),
m(close_minute),
m(close_minute, direction="next", _parse=False),
m(close_minute, direction="previous", _parse=False),
m(close_minute, direction="none", _parse=False),
session_label,
]
if self.GAPS_BETWEEN_SESSIONS:
minutes_that_resolve_to_this_session.append(
m(minute_before_session, _parse=False)
)
minutes_that_resolve_to_this_session.append(
m(minute_before_session, direction="next", _parse=False)
)
minutes_that_resolve_to_this_session.append(
m(minute_after_session, direction="previous", _parse=False)
)
self.assertTrue(
all(
x == minutes_that_resolve_to_this_session[0]
for x in minutes_that_resolve_to_this_session
)
)
minutes_that_resolve_to_next_session = [
m(minute_after_session, _parse=False),
m(minute_after_session, direction="next", _parse=False),
next_session_label,
]
self.assertTrue(
all(
x == minutes_that_resolve_to_next_session[0]
for x in minutes_that_resolve_to_next_session
)
)
self.assertEqual(
m(minute_before_session, direction="previous", _parse=False),
previous_session_label,
)
if self.GAPS_BETWEEN_SESSIONS:
# Make sure we use the cache correctly
minutes_that_resolve_to_different_sessions = [
m(minute_after_session, direction="next", _parse=False),
m(minute_after_session, direction="previous", _parse=False),
m(minute_after_session, direction="next", _parse=False),
]
self.assertEqual(
minutes_that_resolve_to_different_sessions,
[next_session_label, session_label, next_session_label],
)
# make sure that exceptions are raised at the right time
with self.assertRaises(ValueError):
m(open_minute, "asdf", _parse=False)
if self.GAPS_BETWEEN_SESSIONS:
with self.assertRaises(ValueError):
m(minute_before_session, direction="none", _parse=False)
# minute is later than last session's close
minute_after_last_close = self.answers.iloc[-1].market_close + self.one_minute
session_label = self.answers.index[-1]
minute_that_resolves_to_session_label = m(
minute_after_last_close, direction="previous", _parse=False
)
self.assertEqual(session_label, minute_that_resolves_to_session_label)
with self.assertRaises(ValueError):
m(minute_after_last_close, _parse=False)
with self.assertRaises(ValueError):
m(minute_after_last_close, direction="next", _parse=False)
with self.assertRaises(ValueError):
m(minute_after_last_close, direction="none", _parse=False)
@parameterized.expand(
[
(1, 0),
(2, 0),
(2, 1),
]
)
def test_minute_index_to_session_labels(self, interval, offset):
minutes = self.calendar.minutes_for_sessions_in_range(
self.MINUTE_INDEX_TO_SESSION_LABELS_START,
self.MINUTE_INDEX_TO_SESSION_LABELS_END,
)
minutes = minutes[range(offset, len(minutes), interval)]
np.testing.assert_array_equal(
pd.DatetimeIndex(minutes.map(self.calendar.minute_to_session_label)),
self.calendar.minute_index_to_session_labels(minutes),
)
def test_next_prev_session(self):
session_labels = self.answers.index[1:-2]
max_idx = len(session_labels) - 1
# the very first session
first_session_label = self.answers.index[0]
with self.assertRaises(ValueError):
self.calendar.previous_session_label(first_session_label, _parse=False)
# all the sessions in the middle
for idx, session_label in enumerate(session_labels):
if idx < max_idx:
self.assertEqual(
self.calendar.next_session_label(session_label, _parse=False),
session_labels[idx + 1],
)
if idx > 0:
self.assertEqual(
self.calendar.previous_session_label(session_label, _parse=False),
session_labels[idx - 1],
)
# the very last session
last_session_label = self.answers.index[-1]
with self.assertRaises(ValueError):
self.calendar.next_session_label(last_session_label, _parse=False)
@staticmethod
def _find_full_session(calendar):
for session_label in calendar.schedule.index:
if session_label not in calendar.early_closes:
return session_label
return None
def test_minutes_for_period(self):
# full session
# find a session that isn't an early close. start from the first
# session, should be quick.
full_session_label = self._find_full_session(self.calendar)
if full_session_label is None:
raise ValueError("Cannot find a full session to test!")
minutes = self.calendar.minutes_for_session(full_session_label)
_open, _close = self.calendar.open_and_close_for_session(full_session_label)
_break_start, _break_end = self.calendar.break_start_and_end_for_session(
full_session_label
)
if not pd.isnull(_break_start):
constructed_minutes = np.concatenate(
[
pd.date_range(start=_open, end=_break_start, freq="min"),
| pd.date_range(start=_break_end, end=_close, freq="min") | pandas.date_range |
import pytest
from pysqlgui import core_database
from pysqlgui.core_table import Table
import pandas as pd
def test_init_no_parameters():
db = core_database.Database()
assert hasattr(db, "connection")
assert hasattr(db, "cursor")
assert hasattr(db, "name")
assert hasattr(db, "tables")
assert db.name is None
assert db.tables == []
def test_init_with_name_parameter_only():
db = core_database.Database(None, None, "name_of_db")
assert db.name == "name_of_db"
assert isinstance(db.name, str)
assert db.tables == []
def test_get_table_check_table_type():
db = core_database.Database([pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])],['example_table'])
assert isinstance(db.get_table('example_table'), Table)
def test_get_table_on_non_existent_table():
db = core_database.Database()
with pytest.raises(ValueError):
db.get_table('some_table_name_that_doesnt_exist')
def test_remove_on_non_existent_table():
db = core_database.Database()
with pytest.raises(ValueError):
db.remove(Table(pd.DataFrame(), 'some_table_name_that_doesnt_exist'))
def test_remove_on_existent_table():
db = core_database.Database([pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])],['example_table'])
t = db.get_table('example_table')
assert isinstance(t, Table)
assert db.remove(t) is None
assert len(db.tables) == 0
def test_summary_on_existent_table():
example_df = [pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])]
db = core_database.Database(example_df, ['example_table'])
df = db.summary()
assert isinstance(df, pd.DataFrame)
assert not df.empty
assert list(df.columns.values) == ['Table Name', 'Rows', 'Columns']
assert any(df['Table Name'] == 'example_table')
assert df[df['Table Name'] == 'example_table']['Rows'].values[0] == 3
assert df[df['Table Name'] == 'example_table']['Columns'].values[0] == 2
def test_info_on_existent_table_but_called_with_wrong_name():
db = core_database.Database([pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])],['example_table'])
t = db.get_table('example_table')
assert isinstance(t, Table)
with pytest.raises(ValueError):
db.info('table_does_not_exist')
def test_info_on_existent_table():
db = core_database.Database([pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])],['example_table'])
t = db.get_table('example_table')
assert isinstance(t, Table)
df = db.info('example_table')
assert isinstance(df, pd.DataFrame)
assert set(list(df.columns.values)) == {'Column ID', 'Column Name', 'Type', 'Not NULL?', 'Default Value', 'Primary Key?'}
assert all(df[df['Column Name'] == 'Primary Key?'])
assert any(df[df['Column Name'] == 'age'])
def test_run_query_simple_select():
db = core_database.Database([pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])],['example_table'])
assert isinstance(db.run_query('SELECT * FROM example_table'), pd.DataFrame)
assert not db.run_query('SELECT * FROM example_table').empty
def test_run_query_with_pragma():
db = core_database.Database([pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])],['example_table'])
assert isinstance(db.run_query('''PRAGMA TABLE_INFO('example_table')'''), pd.DataFrame)
assert not db.run_query('''PRAGMA TABLE_INFO('example_table')''').empty
def test_run_query_wrong_syntax():
db = core_database.Database([pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])],['example_table'])
with pytest.raises(ValueError):
db.run_query('SELECT * FROMMMMM example_table')
def test_select_simple_query():
db = core_database.Database([pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])],['example_table'])
assert not db.select('''SELECT * FROM example_table''').empty
def test_select_wrong_syntax():
db = core_database.Database([pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])],['example_table'])
with pytest.raises(ValueError):
db.select('SELECT * FROMMMMM example_table')
def test_add_table_valid_data_in_list_but_no_table_name():
df = pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])
db = core_database.Database()
with pytest.raises(ValueError):
db.add_table([df])
def test_add_table_empty_data_no_table_name():
db = core_database.Database()
num_of_tables = len(db.tables)
db.add_table(None, None)
assert num_of_tables == len(db.tables)
def test_add_table_empty_data_with_table_name():
db = core_database.Database()
num_of_tables = len(db.tables)
db.add_table(None, ['example_table'])
assert num_of_tables == len(db.tables)
def test_add_table_but_wrong_agrument_types_1():
df = pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])
db = core_database.Database()
with pytest.raises(TypeError):
db.add_table([df], 'example_table')
def test_add_table_but_wrong_agrument_types_2():
df = pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])
db = core_database.Database()
with pytest.raises(TypeError):
db.add_table(df, 'example_table')
def test_add_table_but_wrong_agrument_types_3():
df = pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])
db = core_database.Database()
with pytest.raises(TypeError):
db.add_table(df, ['example_table'])
def test_add_table_valid_data_in_list_and_table_name_in_list():
df = pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])
db = core_database.Database()
num_of_tables = len(db.tables)
db.add_table([df], ['example_table'])
assert num_of_tables == len(db.tables) - 1
def test_add_table_valid_data_in_dict():
df = pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])
db = core_database.Database()
num_of_tables = len(db.tables)
db.add_table({'example_table': df})
assert num_of_tables == len(db.tables) - 1
def test_add_table_valid_data_in_dict_but_wrong_key_value_order():
df = pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])
db = core_database.Database()
with pytest.raises(TypeError):
db.add_table({df: 'example_table'})
def test_rename_table():
df = pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])
db = core_database.Database([df], ['table_1'])
assert db.rename_table('table_1', 'table_1_new_name') is None
assert db.get_table('table_1_new_name').name == 'table_1_new_name'
def test_rename_table_empty_string():
df = pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])
db = core_database.Database([df], ['table_1'])
with pytest.raises(ValueError):
db.rename_table('table_1', '')
def test_rename_table_not_a_string():
df = pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])
db = core_database.Database([df], ['table_1'])
with pytest.raises(TypeError):
db.rename_table('table_1', ['a_list'])
def test_rename_table_which_doesnt_exist():
df = pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])
db = core_database.Database([df], ['table_1'])
with pytest.raises(ValueError):
db.rename_table('some_table_name_that_doesnt_exist', 'new_name')
def test_drop_table_when_no_tables_exist():
db = core_database.Database()
with pytest.raises(ValueError):
db.drop_table("this_table_doesnt_exist")
def test_drop_table_when_no_tables_exist_empty_string():
db = core_database.Database()
with pytest.raises(ValueError):
db.drop_table("")
def test_drop_table():
df = pd.DataFrame([['tom', 10], ['bob', 15], ['juli', 14]], columns=['name', 'age'])
db = core_database.Database([df], ['table_1'])
num_of_tables = len(db.tables)
db.drop_table('table_1')
assert num_of_tables == len(db.tables) + 1
with pytest.raises(ValueError):
db.get_table('table_1')
def test_create_table_empty_string():
db = core_database.Database()
with pytest.raises(ValueError):
db.create_table('', {'user_id': 'INT'})
def test_create_table_incorrect_table_name_format():
db = core_database.Database()
with pytest.raises(ValueError):
db.create_table(['not_a_string'], {'user_id': 'INT'})
def test_create_table_incorrect_column_data_format():
db = core_database.Database()
with pytest.raises(ValueError):
db.create_table('not_a_string', [{'user_id': 'INT'}])
def test_create_table():
db = core_database.Database()
num_of_tables = len(db.tables)
db.create_table('example_table', {'user_id': 'INTEGER',
'first_name': 'TEXT',
'join_date': 'DATE',
'score': 'FLOAT'
})
assert num_of_tables == len(db.tables) - 1
assert db.get_table('example_table').name == 'example_table'
assert db.info().shape[0] == 1
def test_create_table_with_primary_key():
db = core_database.Database()
num_of_tables = len(db.tables)
db.create_table('example_table', {'user_id': 'INTEGER PRIMARY KEY AUTOINCREMENT',
'first_name': 'TEXT',
'join_date': 'DATE',
'score': 'FLOAT'
})
assert num_of_tables == len(db.tables) - 1
assert db.get_table('example_table').name == 'example_table'
assert db.info().shape[0] == 1
def test_create_table_with_primary_key_and_foreign_key():
db = core_database.Database()
num_of_tables = len(db.tables)
db.create_table('example_table_1', {'user_id': 'INTEGER PRIMARY KEY AUTOINCREMENT',
'first_name': 'TEXT',
'join_date': 'DATE',
'score': 'FLOAT'
})
db.create_table('example_table_2', {'food_id': 'INTEGER',
'user_id': 'INTEGER REFERENCES example_table_1(user_id)'
})
assert num_of_tables == len(db.tables) - 2
assert db.info().shape[0] == 2
def test_insert_data_pandas_dataframe():
my_db = core_database.Database([pd.DataFrame({'name': ['John', 'Mary'], 'age': [32, 18]})],
['USERS'])
more_data = pd.DataFrame({'name': ['Bob', 'Simram'], 'age': [22, 5]})
my_db.insert_data('USERS', more_data)
assert my_db.show('USERS').shape[0] == 4
def test_insert_data_dict():
my_db = core_database.Database([pd.DataFrame({'name': ['John', 'Mary'], 'age': [32, 18]})],
['USERS'])
my_db.insert_data('USERS', {'name': 'Bob', 'age': 22})
assert my_db.show('USERS').shape[0] == 3
def test_insert_data_wrong_data_type():
my_db = core_database.Database([ | pd.DataFrame({'name': ['John', 'Mary'], 'age': [32, 18]}) | pandas.DataFrame |
"""Parse Tecan files, group lists and fit titrations.
(Titrations are described in list.pH or list.cl file.
Builds 96 titrations and export them in txt files. In the case of 2 labelblocks
performs a global fit saving a png and printing the fitting results.)
:ref:`prtecan parse`:
* Labelblock
* Tecanfile
:ref:`prtecan group`:
* LabelblocksGroup
* TecanfilesGroup
* Titration
* TitrationAnalysis
Functions
---------
.. autofunction:: fit_titration
.. autofunction:: fz_Kd_singlesite
.. autofunction:: fz_pK_singlesite
.. autofunction:: extract_metadata
.. autofunction:: strip_lines
"""
import copy
import hashlib
import itertools
import os
import warnings
from typing import Any, Dict, List, Optional, Tuple, Union # , overload
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import scipy.stats
import seaborn as sb
from matplotlib.backends.backend_pdf import PdfPages
list_of_lines = List[List]
# bug xdoctest-3.7 #import numpy.typing as npt
def strip_lines(lines: list_of_lines) -> list_of_lines:
"""Remove empty fields/cells from lines read from a csv file.
([a,,b,,,]-->[a,b])
Parameters
----------
lines
Lines that are a list of fields, typically from a csv/xls file.
Returns
-------
Lines removed from blank cells.
"""
stripped_lines = []
for line in lines:
sl = [line[i] for i in range(len(line)) if line[i] != '']
stripped_lines.append(sl)
return stripped_lines
# def extract_metadata(lines: list_of_lines) -> Dict[str, Union[str, float, List[Any]]]:
def extract_metadata(lines: list_of_lines) -> Dict[str, Any]:
"""Extract metadata from a list of stripped lines.
First field is the *key*, remaining fields goes into a list of values::
['', 'key', '', '', 'value1', '', ..., 'valueN', ''] -->
{key: [value1, ..., valueN]}
*Label* and *Temperature* are two exceptions::
['Label: labelXX', '', '', '', '', '', '', '']
['', 'Temperature: XX °C', '', '', '', '', '', '']
Parameters
----------
lines
Lines that are a list of fields, typically from a csv/xls file.
Returns
-------
Metadata for Tecanfile or Labelblock.
"""
stripped_lines = strip_lines(lines)
temp = {
'Temperature': float(line[0].split(':')[1].split('°C')[0])
for line in stripped_lines
if len(line) == 1 and 'Temperature' in line[0]
}
labl = {
'Label': line[0].split(':')[1].strip()
for line in stripped_lines
if len(line) == 1 and 'Label' in line[0]
}
m1 = {
line[0]: line[0]
for line in stripped_lines
if len(line) == 1 and 'Label' not in line[0] and 'Temperature' not in line[0]
}
m2: Dict[str, Union[str, float, List[str]]] = {
line[0]: line[1:] for line in stripped_lines if len(line) > 1
}
m2.update(m1)
m2.update(temp)
m2.update(labl)
return m2
def fz_Kd_singlesite(K: float, p: np.ndarray, x: np.ndarray) -> np.ndarray:
"""Fit function for Cl titration."""
return (p[0] + p[1] * x / K) / (1 + x / K)
def fz_pK_singlesite(K: float, p: np.ndarray, x: np.ndarray) -> np.ndarray:
"""Fit function for pH titration."""
return (p[1] + p[0] * 10 ** (K - x)) / (1 + 10 ** (K - x))
def fit_titration(
kind: str,
x: np.ndarray,
y: np.ndarray,
y2: Optional[np.ndarray] = None,
residue: Optional[np.ndarray] = None,
residue2: Optional[np.ndarray] = None,
tval_conf: float = 0.95,
) -> pd.DataFrame:
"""Fit pH or Cl titration using a single-site binding model.
Returns confidence interval (default=0.95) for fitting params (cov*tval), rather than
standard error of the fit. Use scipy leastsq. Determine 3 fitting parameters:
- binding constant *K*
- and 2 plateau *SA* and *SB*.
Parameters
----------
kind
Titration type {'pH'|'Cl'}
x, y
Main dataset.
y2
Second dataset (share x with main dataset).
residue
Residues for main dataset.
residue2
Residues for second dataset.
tval_conf
Confidence level (default 0.95) for parameter estimations.
Returns
-------
Fitting results.
Raises
------
NameError
When kind is different than "pH" or "Cl".
Examples
--------
>>> fit_titration("Cl", [1, 10, 30, 100, 200], [10, 8, 5, 1, 0.1])[["K", "sK"]]
K sK
0 38.955406 30.201929
"""
if kind == 'pH':
fz = fz_pK_singlesite
elif kind == 'Cl':
fz = fz_Kd_singlesite
else:
raise NameError('kind= pH or Cl')
def compute_p0(x: np.ndarray, y: np.ndarray) -> np.ndarray:
df = pd.DataFrame({'x': x, 'y': y})
SA = df.y[df.x == min(df.x)].values[0]
SB = df.y[df.x == max(df.x)].values[0]
K = np.average([max(y), min(y)])
try:
x1, y1 = df[df['y'] >= K].values[0]
except IndexError:
x1 = np.nan
y1 = np.nan
try:
x2, y2 = df[df['y'] <= K].values[0]
except IndexError:
x2 = np.nan
y2 = np.nan
K = (x2 - x1) / (y2 - y1) * (K - y1) + x1
return np.r_[K, SA, SB]
x = np.array(x)
y = np.array(y)
if y2 is None:
def ssq1(p: np.ndarray, x: np.ndarray, y1: np.ndarray) -> np.ndarray:
return np.r_[y1 - fz(p[0], p[1:3], x)]
p0 = compute_p0(x, y)
p, cov, info, msg, success = scipy.optimize.leastsq(
ssq1, p0, args=(x, y), full_output=True, xtol=1e-11
)
else:
def ssq2(
p: np.ndarray,
x: np.ndarray,
y1: np.ndarray,
y2: np.ndarray,
rd1: np.ndarray,
rd2: np.ndarray,
) -> np.ndarray:
return np.r_[
(y1 - fz(p[0], p[1:3], x)) / rd1**2,
(y2 - fz(p[0], p[3:5], x)) / rd2**2,
]
p1 = compute_p0(x, y)
p2 = compute_p0(x, y2)
ave = np.average([p1[0], p2[0]])
p0 = np.r_[ave, p1[1], p1[2], p2[1], p2[2]]
tmp = scipy.optimize.leastsq(
ssq2, p0, full_output=True, xtol=1e-11, args=(x, y, y2, residue, residue2)
)
p, cov, info, msg, success = tmp
res = pd.DataFrame({'ss': [success]})
res['msg'] = msg
if 1 <= success <= 4:
try:
tval = (tval_conf + 1) / 2
chisq = sum(info['fvec'] * info['fvec'])
res['df'] = len(y) - len(p)
res['tval'] = scipy.stats.distributions.t.ppf(tval, res.df)
res['chisqr'] = chisq / res.df
res['K'] = p[0]
res['SA'] = p[1]
res['SB'] = p[2]
if y2 is not None:
res['df'] += len(y2)
res['tval'] = scipy.stats.distributions.t.ppf(tval, res.df)
res['chisqr'] = chisq / res.df
res['SA2'] = p[3]
res['SB2'] = p[4]
res['sSA2'] = np.sqrt(cov[3][3] * res.chisqr) * res.tval
res['sSB2'] = np.sqrt(cov[4][4] * res.chisqr) * res.tval
res['sK'] = np.sqrt(cov[0][0] * res.chisqr) * res.tval
res['sSA'] = np.sqrt(cov[1][1] * res.chisqr) * res.tval
res['sSB'] = np.sqrt(cov[2][2] * res.chisqr) * res.tval
except TypeError:
pass # if some params are not successfully determined.
return res
class Labelblock:
"""Parse a label block within a Tecan file.
Parameters
----------
tecanfile :
Object containing (has-a) this Labelblock.
lines :
Lines for this Labelblock.
Attributes
----------
tecanfile
metadata : dict
Metadata specific for this Labelblock.
data : Dict[str, float]
The 96 data values as {'well_name': value}.
Raises
------
Exception
When data do not correspond to a complete 96-well plate.
Warns
-----
Warning
When it replaces "OVER" with ``np.nan`` for any saturated value.
"""
def __init__(
self,
tecanfile: Optional['Tecanfile'],
lines: list_of_lines,
) -> None:
try:
assert lines[14][0] == '<>' and lines[23] == lines[24] == [
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
]
except AssertionError as err:
raise Exception('Cannot build Labelblock: not 96 wells?') from err
stripped = strip_lines(lines)
stripped[14:23] = []
self.tecanfile = tecanfile
self.metadata = extract_metadata(stripped)
self.data = self._extract_data(lines[15:23])
def _extract_data(self, lines: list_of_lines) -> Dict[str, float]:
"""Convert data into a dictionary.
{'A01' : value}
:
{'H12' : value}
Parameters
----------
lines
xls file read into lines.
Returns
-------
dict
Data from a label block.
Raises
------
Exception
When something went wrong. Possibly because not 96-well.
Warns
-----
When a cell contains saturated signal (converted into np.nan).
"""
rownames = tuple('ABCDEFGH')
data = {}
try:
assert len(lines) == 8
for i, row in enumerate(rownames):
assert lines[i][0] == row # e.g. "A" == "A"
for col in range(1, 13):
try:
data[row + "{0:0>2}".format(col)] = float(lines[i][col])
except ValueError:
data[row + "{0:0>2}".format(col)] = np.nan
path = self.tecanfile.path if self.tecanfile else ""
warnings.warn(
"OVER value in {0}{1:0>2} well for {2} of tecanfile: {3}".format(
row, col, self.metadata['Label'], path
)
)
except AssertionError as err:
raise Exception("Cannot extract data in Labelblock: not 96 wells?") from err
return data
KEYS = [
'Emission Bandwidth',
'Emission Wavelength',
'Excitation Bandwidth',
'Excitation Wavelength',
'Integration Time',
'Mode',
'Number of Flashes',
]
def __eq__(self, other: object) -> bool:
"""Two labelblocks are equal when metadata KEYS are identical."""
# Identical labelblocks can be grouped safely into the same titration; otherwise
# some kind of normalization (# of flashes, gain, etc.) would be
# necessary.
if not isinstance(other, Labelblock):
return NotImplemented
eq: bool = True
for k in Labelblock.KEYS:
eq *= self.metadata[k] == other.metadata[k]
# 'Gain': [81.0, 'Manual'] = 'Gain': [81.0, 'Optimal'] They are equal
eq *= self.metadata['Gain'][0] == other.metadata['Gain'][0]
# annotation error: Value of type "Union[str, float, List[str]]" is not indexable
return eq
class Tecanfile:
"""Parse a .xls file as exported from Tecan.
Parameters
----------
path
Name of the xls file.
Attributes
----------
path
metadata : dict
General metadata for Tecanfile e.g. 'Date:' or 'Shaking Duration:'.
labelblocks : List[Labelblock]
All labelblocks contained in the file.
Methods
-------
read_xls(path) :
Read xls file at path.
lookup_csv_lines(csvl, pattern='Label: Label', col=0) :
Return row index for pattern found at col.
Raises
------
FileNotFoundError
When path does not exist.
Exception
When no Labelblock is found.
"""
def __init__(self, path: str) -> None:
csvl = Tecanfile.read_xls(path)
idxs = Tecanfile.lookup_csv_lines(csvl, pattern='Label: Label', col=0)
if len(idxs) == 0:
raise Exception('No Labelblock found.')
# path
self.path = path
# metadata
self.metadata = extract_metadata(csvl[: idxs[0]])
# labelblocks
labelblocks = []
n_labelblocks = len(idxs)
idxs.append(len(csvl))
for i in range(n_labelblocks):
labelblocks.append(Labelblock(self, csvl[idxs[i] : idxs[i + 1]]))
self.labelblocks = labelblocks
def __eq__(self, other: object) -> bool:
"""Two Tecanfile are equal if their attributes are."""
# never used thus far.
# https://izziswift.com/compare-object-instances-for-equality-by-their-attributes/
return self.__dict__ == other.__dict__
def __hash__(self) -> int:
"""Define hash (related to __eq__) using self.path."""
return hash(self.path)
@classmethod
def read_xls(cls, path: str) -> list_of_lines:
"""Read first sheet of an xls file.
Parameters
----------
path
Path to .xls file.
Returns
-------
Lines.
"""
df = pd.read_excel(path)
n0 = pd.DataFrame([[np.nan] * len(df.columns)], columns=df.columns)
df = pd.concat([n0, df], ignore_index=True)
df.fillna('', inplace=True)
return df.values.tolist()
@classmethod
def lookup_csv_lines(
cls,
csvl: list_of_lines,
pattern: str = 'Label: Label',
col: int = 0,
) -> List[int]:
"""Lookup the line number where given pattern occurs.
If nothing found return empty list.
Parameters
----------
csvl
Lines of a csv/xls file.
pattern
Pattern to be searched for., default="Label: Label"
col
Column to search (line-by-line).
Returns
-------
Row/line index for all occurrences of pattern.
"""
idxs = []
for i, line in enumerate(csvl):
if pattern in line[col]:
idxs.append(i)
return idxs
class LabelblocksGroup:
"""Group of labelblocks with 'equal' metadata.
Parameters
----------
labelblocks
List of labelblocks with 'equal' metadata.
Attributes
----------
metadata : dict
The common metadata.
temperatures : List[float]
The temperatire value for each Labelblock.
data : Dict[str, List[float]]
The usual dict for data (see Labelblock) with well name as key but with
list of values as value.
Raises
------
Exception
If metadata are not all 'equal'.
"""
buffer: Dict[str, List[float]]
data: Dict[str, List[float]]
def __init__(self, labelblocks: List[Labelblock]) -> None:
try:
for lb in labelblocks[1:]:
assert labelblocks[0] == lb
except AssertionError as err:
raise AssertionError('Creation of labelblock group failed.') from err
# build common metadata only
metadata = {}
for k in Labelblock.KEYS:
metadata[k] = labelblocks[0].metadata[k]
# list with first element don't care about Manual/Optimal
metadata['Gain'] = [labelblocks[0].metadata['Gain'][0]]
self.metadata = metadata
# temperatures
temperatures = []
for lb in labelblocks:
temperatures.append(lb.metadata['Temperature'])
self.temperatures = temperatures
# data
datagrp: Dict[str, List[float]] = {}
for key in labelblocks[0].data.keys():
datagrp[key] = []
for lb in labelblocks:
datagrp[key].append(lb.data[key])
self.data = datagrp
class TecanfilesGroup:
"""Group of Tecanfiles containing at least one common Labelblock.
Parameters
----------
filenames
List of xls (paths) filenames.
Attributes
----------
labelblocksgroups : List[LabelblocksGroup]
Each group contains its own data like a titration.
Raises
------
Exception
When is not possible to build any LabelblocksGroup because nothing
in common between files (listed in filenames).
Warns
-----
Warning
The Tecanfiles listed in *filenames* are suppossed to contain the
"same" list (of length N) of Labelblocks. So, N labelblocksgroups
will be normally created. A warn will raise if not all Tecanfiles
contains the same number of Labelblocks ('equal' mergeable) in the
same order, but a number M < N of groups can be built.
"""
def __init__(self, filenames: List[str]) -> None:
tecanfiles = []
for f in filenames:
tecanfiles.append(Tecanfile(f))
tf0 = tecanfiles[0]
grps = []
if all([tf0.labelblocks == tf.labelblocks for tf in tecanfiles[1:]]):
# expected behaviour
for i, _lb in enumerate(tf0.labelblocks):
gr = LabelblocksGroup([tf.labelblocks[i] for tf in tecanfiles])
grps.append(gr)
else:
# Try to creates as many as possible groups of labelblocks
# with length=len(tecanfiles).
# Not for 'equal' labelblocks within the same tecanfile.
n_tecanfiles = len(tecanfiles)
nmax_labelblocks = max([len(tf.labelblocks) for tf in tecanfiles])
for idx in itertools.product(range(nmax_labelblocks), repeat=n_tecanfiles):
try:
for i, tf in enumerate(tecanfiles):
tf.labelblocks[idx[i]]
except IndexError:
continue
# if all labelblocks exhist
else:
try:
gr = LabelblocksGroup(
[tf.labelblocks[idx[i]] for i, tf in enumerate(tecanfiles)]
)
except AssertionError:
continue
# if labelblocks are all 'equal'
else:
grps.append(gr)
if len(grps) == 0:
raise Exception('No common labelblock in filenames' + str(filenames))
else:
warnings.warn(
'Different LabelblocksGroup among filenames.' + str(filenames)
)
self.metadata = tecanfiles[0].metadata
self.labelblocksgroups = grps
class Titration(TecanfilesGroup):
"""Group tecanfiles into a Titration as indicated by a listfile.
The script will work from any directory: list.pH list filenames relative to
its position.
Parameters
----------
listfile
File path to the listfile ([tecan_file_path conc]).
Attributes
----------
conc : List[float]
Concentration values common to all 96 titrations.
labelblocksgroups: List[LabelblocksGroup]
List of labelblocksgroups.
"""
def __init__(self, listfile: str) -> None:
try:
df = pd.read_table(listfile, names=['filenames', 'conc'])
except FileNotFoundError as err:
raise FileNotFoundError('Cannot find: ' + listfile) from err
try:
assert df["filenames"].count() == df["conc"].count()
except AssertionError as err:
msg = 'Check format [filenames conc] for listfile: '
raise Exception(msg + listfile) from err
self.conc = df["conc"].tolist()
dirname = os.path.dirname(listfile)
filenames = [os.path.join(dirname, fn) for fn in df["filenames"]]
super().__init__(filenames)
def export_dat(self, path: str) -> None:
"""Export dat files [x,y1,..,yN] from labelblocksgroups.
Parameters
----------
path
Path to output folder.
"""
if not os.path.isdir(path):
os.makedirs(path)
for key, dy1 in self.labelblocksgroups[0].data.items():
df = pd.DataFrame({'x': self.conc, 'y1': dy1})
for n, lb in enumerate(self.labelblocksgroups[1:], start=2):
dy = lb.data[key]
df['y' + str(n)] = dy
df.to_csv(os.path.join(path, key + '.dat'), index=False)
class TitrationAnalysis(Titration):
"""Perform analysis of a titration.
Parameters
----------
titration
Titration object.
schemefile
File path to the schemefile (e.g. {"C01: 'V224Q'"}).
Attributes
----------
scheme : pd.DataFrame or pd.Series FIXME
e.g. {'buffer': ['H12']}
conc : List[float]
Concentration values common to all 96 titrations.
labelblocksgroups : List[LabelblocksGroup]
Deepcopy from titration.
Methods
-------
subtract_bg
dilution_correction
metadata_normalization
calculate_conc
fit
"""
def __init__(self, titration: Titration, schemefile: Optional[str] = None) -> None:
if schemefile is None:
self.scheme = pd.Series({'well': []})
else:
df = | pd.read_table(schemefile) | pandas.read_table |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 2 22:43:12 2021
@author: obnit
"""
import pandas as pd , matplotlib.pyplot as plt , numpy as np
df = pd.read_csv('Export/tested.csv')
df.Date = pd.to_datetime(df.Date)
df.set_index('Date', inplace=True)
df = df.resample('MS').sum()
df['Percent'] = ((df['Positive']/df['Negative'])*100)
df1 = | pd.read_csv('Export/reported.csv') | pandas.read_csv |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version:
@author: zzh
@file: factor_earning_expectation.py
@time: 2019-9-19
"""
import pandas as pd
class FactorEarningExpectation():
"""
盈利预期
"""
def __init__(self):
__str__ = 'factor_earning_expectation'
self.name = '盈利预测'
self.factor_type1 = '盈利预测'
self.factor_type2 = '盈利预测'
self.description = '个股盈利预测因子'
@staticmethod
def NPFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['net_profit_fy1']):
"""
:name: 一致预期净利润(FY1)
:desc: 一致预期净利润的未来第一年度的预测
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'net_profit_fy1': 'NPFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['net_profit_fy2']):
"""
:name: 一致预期净利润(FY2)
:desc: 一致预期净利润的未来第二年度的预测
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'net_profit_fy2': 'NPFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['eps_fy1']):
"""
:name: 一致预期每股收益(FY1)
:desc: 一致预期每股收益未来第一年度的预测均值
:unit: 元
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'eps_fy1': 'EPSFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['eps_fy2']):
"""
:name: 一致预期每股收益(FY2)
:desc: 一致预期每股收益未来第二年度的预测均值
:unit: 元
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'eps_fy2': 'EPSFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['operating_revenue_fy1']):
"""
:name: 一致预期营业收入(FY1)
:desc: 一致预期营业收入未来第一年度的预测均值
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'operating_revenue_fy1': 'OptIncFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['operating_revenue_fy2']):
"""
:name: 一致预期营业收入(FY2)
:desc: 一致预期营业收入未来第二年度的预测均值
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'operating_revenue_fy2': 'OptIncFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['pe_fy1']):
"""
:name: 一致预期市盈率(PE)(FY1)
:desc: 一致预期市盈率未来第一年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pe_fy1': 'CEPEFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['pe_fy2']):
"""
:name: 一致预期市盈率(PE)(FY2)
:desc: 一致预期市盈率未来第二年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pe_fy2': 'CEPEFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPBFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['pb_fy1']):
"""
:name: 一致预期市净率(PB)(FY1)
:desc: 一致预期市净率未来第一年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pb_fy1': 'CEPBFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPBFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['pb_fy2']):
"""
:name: 一致预期市净率(PB)(FY2)
:desc: 一致预期市净率未来第二年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pb_fy2': 'CEPBFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEGFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['peg_fy1']):
"""
:name: 市盈率相对盈利增长比率(FY1)
:desc: 未来第一年度市盈率相对盈利增长比率
:unit:
:view_dimension: 0.01
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'peg_fy1': 'CEPEGFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEGFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['peg_fy2']):
"""
:name: 市盈率相对盈利增长比率(FY2)
:desc: 未来第二年度市盈率相对盈利增长比率
:unit:
:view_dimension: 0.01
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'peg_fy2': 'CEPEGFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def _change_rate(tp_earning, trade_date, pre_trade_date, colunm, factor_name):
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, colunm]
earning_expect_pre = tp_earning[tp_earning['publish_date'] == pre_trade_date].loc[:, colunm]
earning_expect = pd.merge(earning_expect, earning_expect_pre, on='security_code', how='left')
earning_expect[factor_name] = (earning_expect[colunm + '_x'] - earning_expect[colunm + '_y']) / \
earning_expect[colunm + '_y']
earning_expect.drop(columns=[colunm + '_x', colunm + '_y'], inplace=True)
return earning_expect
@staticmethod
def _change_value(tp_earning, trade_date, pre_trade_date, colunm, factor_name):
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, colunm]
earning_expect_pre = tp_earning[tp_earning['publish_date'] == pre_trade_date].loc[:, colunm]
earning_expect = pd.merge(earning_expect, earning_expect_pre, on='security_code', how='left')
earning_expect[factor_name] = (earning_expect[colunm + '_x'] - earning_expect[colunm + '_y'])
earning_expect.drop(columns=[colunm + '_x', colunm + '_y'], inplace=True)
return earning_expect
@staticmethod
def NPFY11WRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_一周
:desc: 未来第一年度一致预测净利润一周内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[1],
'net_profit_fy1',
'NPFY11WRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY11MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_一月
:desc: 未来第一年度一致预测净利润一月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[2],
'net_profit_fy1',
'NPFY11MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY13MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_三月
:desc: 未来第一年度一致预测净利润三月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[3],
'net_profit_fy1',
'NPFY13MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY16MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_六月
:desc: 未来第一年度一致预测净利润六月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[4],
'net_profit_fy1',
'NPFY16MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11WChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_一周
:desc: 未来第一年度一致预测每股收益一周内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'eps_fy1',
'EPSFY11WChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_一月
:desc: 未来第一年度一致预测每股收益一月内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[2],
'eps_fy1',
'EPSFY11MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY13MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_三月
:desc: 未来第一年度一致预测每股收益三月内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[3],
'eps_fy1',
'EPSFY13MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY16MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_六月
:desc: 未来第一年度一致预测每股收益六月内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[4],
'eps_fy1',
'EPSFY16MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11WRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_一周
:desc: 未来第一年度一致预测每股收益一周内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'eps_fy1',
'EPSFY11WRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_一月
:desc: 未来第一年度一致预测每股收益一月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[2],
'eps_fy1',
'EPSFY11MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY13MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_三月
:desc: 未来第一年度一致预测每股收益三月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[3],
'eps_fy1',
'EPSFY13MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY16MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_六月
:desc: 未来第一年度一致预测每股收益六月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[4],
'eps_fy1',
'EPSFY16MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY11WChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化_一周
:desc: 未来第一年度一致预测净利润一周内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'net_profit_fy1',
'NPFY11WChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY11MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化_一月
:desc: 未来第一年度一致预测净利润一月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[2],
'net_profit_fy1',
'NPFY11MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY13MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化_三月
:desc: 未来第一年度一致预测净利润三月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[3],
'net_profit_fy1',
'NPFY13MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY16MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化_六月
:desc: 未来第一年度一致预测净利润六月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[4],
'net_profit_fy1',
'NPFY16MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def ChgNPFY1FY2(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY2)与一致预期净利润(FY1)的变化率
:desc: 未来第二年度一致预测净利润与未来第一年度一致预测净利润变化率
:unit:
:view_dimension: 0.01
"""
factor_earning_expect['ChgNPFY1FY2'] = factor_earning_expect['NPFY2'] - factor_earning_expect['NPFY1'] / abs(
factor_earning_expect['NPFY1']) * 100
return factor_earning_expect
@staticmethod
def ChgEPSFY1FY2(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY2)与一致预期每股收益(FY1)的变化率
:desc: 未来第二年度一致预测每股收益与未来第一年度一致预测每股收益变化率
:unit:
:view_dimension: 0.01
"""
factor_earning_expect['ChgEPSFY1FY2'] = factor_earning_expect['EPSFY2'] - factor_earning_expect['EPSFY1'] / abs(
factor_earning_expect['EPSFY1']) * 100
return factor_earning_expect
@staticmethod
def OptIncFY11WRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化_一周
:desc: 未来第一年度一致预测营业收入一周内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'operating_revenue_fy1',
'OptIncFY11WRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY11MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化_一月
:desc: 未来第一年度一致预测营业收入一月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[2],
'operating_revenue_fy1',
'OptIncFY11MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY13MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化_三月
:desc: 未来第一年度一致预测营业收入三月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[3],
'operating_revenue_fy1',
'OptIncFY13MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY16MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化_六月
:desc: 未来第一年度一致预测营业收入六月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[4],
'operating_revenue_fy1',
'OptIncFY16MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY11WChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化率_一周
:desc: 未来第一年度一致预测营业收入一周内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[1],
'operating_revenue_fy1',
'OptIncFY11WChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY11MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化率_一月
:desc: 未来第一年度一致预测营业收入一月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[2],
'operating_revenue_fy1',
'OptIncFY11MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY13MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化率_三月
:desc: 未来第一年度一致预测营业收入三月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[3],
'operating_revenue_fy1',
'OptIncFY13MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY16MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化率_六月
:desc: 未来第一年度一致预测营业收入六月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[4],
'operating_revenue_fy1',
'OptIncFY16MChg')
factor_earning_expect = | pd.merge(factor_earning_expect, earning_expect, on='security_code') | pandas.merge |
import logging
import pandas as pd
import core.artificial_signal_generators as carsigen
import helpers.hprint as hprint
import helpers.hunit_test as hunitest
_LOG = logging.getLogger(__name__)
class TestArmaProcess(hunitest.TestCase):
def test1(self) -> None:
ar_params = [0.75, -0.25]
ma_params = [0.65, 0.35]
arma_process = carsigen.ArmaProcess(ar_params, ma_params)
realization = arma_process.generate_sample(
{"start": "2000-01-01", "periods": 40, "freq": "B"},
scale=1,
burnin=10,
)
self.check_string(
hunitest.convert_df_to_string(
realization, title=realization.name, index=True
)
)
def test2(self) -> None:
ar_params = [0.5]
ma_params = [-0.5]
arma_process = carsigen.ArmaProcess(ar_params, ma_params)
realization = arma_process.generate_sample(
{"start": "2000-01-01", "periods": 40, "freq": "B"},
scale=1,
burnin=5,
)
self.check_string(
hunitest.convert_df_to_string(
realization, title=realization.name, index=True
)
)
def test3(self) -> None:
ar_params = []
ma_params = []
arma_process = carsigen.ArmaProcess(ar_params, ma_params)
realization = arma_process.generate_sample(
{"start": "2000-01-01", "periods": 40, "freq": "B"},
scale=1,
burnin=5,
)
self.check_string(
hunitest.convert_df_to_string(
realization, title=realization.name, index=True
)
)
class TestMultivariateNormalProcess(hunitest.TestCase):
def test1(self) -> None:
mn_process = carsigen.MultivariateNormalProcess()
mn_process.set_cov_from_inv_wishart_draw(dim=4, seed=0)
realization = mn_process.generate_sample(
{"start": "2000-01-01", "periods": 40, "freq": "B"}, seed=0
)
self.check_string(hunitest.convert_df_to_string(realization, index=True))
def test2(self) -> None:
mean = | pd.Series([1, 2]) | pandas.Series |
import pandas
import sqlite3
database_file_name = "data.db"
filename = "LIWC2015 Results (users (52235 files)).csv"
def number(n):
return float(n.replace(",", "."))
def put_personality_to_database(row, cursor):
sql_query = f'INSERT INTO personality (author, WC, C, A, O, E, N) values ("{row.Filename}", {row.WC}, {row.C}, {row.A}, {row.O}, {row.E}, {row.N})'
cursor.execute(sql_query)
with open(filename) as f:
data = f.readlines()
data_split = [v.replace(",", ";", 2).replace("\n", "").split(";") for v in data]
pandas_data = | pandas.DataFrame(data_split[1:], columns=data_split[0]) | pandas.DataFrame |
"""Survival dataset preview or pre-processing functionality.
"""
import pandas as pd
from sklearn.model_selection import ShuffleSplit
from .vision import plot_km_survf
from .simulator import SimulatedData
def survival_stats(data, t_col="t", e_col="e", plot=False):
"""
Print statistics of survival data.
Parameters
----------
data: pandas.DataFrame
Survival data you specified.
t_col: str
Column name of data indicating time.
e_col: str
Column name of data indicating events or status.
plot: boolean
Whether plot survival curve.
"""
print("--------------- Survival Data Statistics ---------------")
N = len(data)
print("# Rows:", N)
print("# Columns: %d + %s + %s" % (len(data.columns) - 2, e_col, t_col))
print("# Event Percentage: %.2f%%" % (100.0 * data[e_col].sum() / N))
print("# Min Time:", data[t_col].min())
print("# Max Time:", data[t_col].max())
print("")
if plot:
plot_km_survf(data, t_col=t_col, e_col=e_col)
def survival_df(data, t_col="t", e_col="e", label_col="Y", exclude_col=[]):
"""
Transform original DataFrame to survival dataframe that would be used in model
training or predicting.
Parameters
----------
data: DataFrame
Survival data to be transformed.
t_col: str
Column name of data indicating time.
e_col: str
Column name of data indicating events or status.
label_col: str
Name of new label in transformed survival data.
exclude_col: list
Columns to be excluded.
Returns
-------
DataFrame:
Transformed survival data. Negtive values in label are taken as right censored.
"""
x_cols = [c for c in data.columns if c not in [t_col, e_col] + exclude_col]
# Negtive values are taken as right censored
data.loc[:, label_col] = data.loc[:, t_col]
data.loc[data[e_col] == 0, label_col] = - data.loc[data[e_col] == 0, label_col]
return data[x_cols + [label_col]]
def load_simulated_data(hr_ratio,
N=1000, num_features=10, num_var=2,
average_death=5, end_time=15,
method="gaussian",
gaussian_config={},
seed=42):
"""
Load simulated data generated by the exponentional distribution.
Parameters
----------
hr_ratio: int or float
`lambda_max` hazard ratio.
N: int
The number of observations.
average_death: int or float
Average death time that is the mean of the Exponentional distribution.
end_time: int or float
Censoring time that represents an 'end of study'. Any death
time greater than end_time will be censored.
num_features: int
Size of observation vector. Default: 10.
num_var: int
Number of varaibles simulated data depends on. Default: 2.
method: string
The type of simulated data. 'linear' or 'gaussian'.
gaussian_config: dict
Dictionary of additional parameters for gaussian simulation.
seed: int
Random state.
Returns
-------
pandas.DataFrame
A simulated survival dataset following the given args.
Notes
-----
<NAME>. Generating survival times to simulate cox proportional
hazards models with time-varying covariates. Statistics in medicine,
31(29):3946-3958, 2012.
"""
generator = SimulatedData(hr_ratio, average_death=average_death, end_time=end_time,
num_features=num_features, num_var=num_var)
raw_data = generator.generate_data(N, method=method, gaussian_config=gaussian_config, seed=seed)
# Transform to DataFrame
df = pd.DataFrame(raw_data['x'], columns=['x_' + str(i) for i in range(num_features)])
df['e'] = raw_data['e']
df['t'] = raw_data['t']
return df
def load_data(file_path, t_col='t', e_col='e', excluded_cols=[],
split_ratio=1.0, normalize=False, seed=42):
"""
load csv file and return a standard survival data for traning or testing.
Parameters
----------
file_path: str
File path. Only support for csv file.
t_col: str
Column name of observed time in your data.
e_col: str
Column name of observed status in your data.
excluded_cols: list
Columns will not be included in the final data.
split_ratio: float
If `split_ratio` is set to 1.0, then full data will be obtained. Otherwise, the
splitted data will be returned.
normalize: bool
If true, then data will be normalized by Min-Max scale.
seed: int
Random seed for splitting data.
Returns
------
pandas.DataFrame
Or tuple of two DataFrames if split_ratio is less than 1.0.
"""
# Read csv data
data_all = pd.read_csv(file_path)
# list columns out
Y_cols = [t_col, e_col]
_not_int_x_cols = Y_cols + excluded_cols
X_cols = [x for x in data_all.columns if x not in _not_int_x_cols]
X = data_all[X_cols]
y = data_all[Y_cols]
# Normalized data
if normalize:
for col in X_cols:
X[col + "_norm"] = (X[col] - X[col].mean()) / (X[col].max() - X[col].min())
X.drop(columns=X_cols, inplace=True)
# Split data
if split_ratio == 1.0:
train_X, train_y = X, y
return pd.concat([train_X, train_y], axis=1)
else:
sss = ShuffleSplit(n_splits=1, test_size=1 - split_ratio, random_state=seed)
for train_index, test_index in sss.split(X, y):
train_X, test_X = X.loc[train_index, :], X.loc[test_index, :]
train_y, test_y = y.loc[train_index, :], y.loc[test_index, :]
return | pd.concat([train_X, train_y], axis=1) | pandas.concat |
import os
import time
import glob
import pandas as pd
from dateutil import parser
import dask.dataframe as dd
#%% directories and files
data_root = os.path.normpath('/home/andreas/data')
dia_dir = os.path.join(data_root, 'TXT', 'DIA')
dia_files = glob.glob(dia_dir + '/*.txt')
parquet_file = os.path.join(data_root, 'dia.parquet')
#%% Helper functions
def get_field_names(txt_file):
df = pd.read_csv(txt_file, sep = '|', nrows = 10)
return list(df.columns)
field_names = get_field_names(dia_files[0])
#%% Diagnosis reports
# New dask data frame
ddf = dd.from_pandas(pd.DataFrame(), npartitions = 10)
# Loop over all text files
start = time.time()
for t, txt_file in enumerate(dia_files):
# Read the entire text into a single string
with open(txt_file) as fd:
next(fd)
data = fd.read()
# Split rows by \n
data_split = data.split('\n')
print('Number of rows in file', len(data_split))
# Loop over all reports in this file
series_list = []
for row in range(len(data_split)):
# split report by field and remove \n
row_text = data_split[row].split('|')
if len(row_text) >= len(field_names):
ser_dict = dict(zip(field_names, row_text[0:len(field_names)]))
# Convert datetime
datetime_field = 'Date'
timestamp = parser.parse(ser_dict[datetime_field])
year = timestamp.year
ser_dict[datetime_field] = timestamp
ser_dict['Report_Year'] = year
ser_dict['Report_File'] = os.path.basename(txt_file)
# Create pandas series object
series = pd.Series(ser_dict)
# Add series to list
series_list.append(series)
if (row+1)%100000==0:
print('File {file}/{total_files}, Note {note}/{total_notes}'.format(file = t+1,
total_files = len(dia_files),
note = row+1,
total_notes = len(data_split)))
print('Time {0:.2f} minutes.'.format((time.time()-start)/60))
# Data frame for this file from list of pd.series objects
df = | pd.DataFrame(series_list) | pandas.DataFrame |
import pandas as pd # type: ignore
import numpy as np # type: ignore
import tqdm # type: ignore
import json
import os
###############################################
# Convert wyscout json files to wyscout.h5
###############################################
def jsonfiles_to_h5(jsonfiles, h5file):
matches = []
players : list = []
teams: list = []
with pd.HDFStore(h5file) as store:
for jsonfile in jsonfiles:
with open(jsonfile, "r", encoding="utf-8") as fh:
root = json.load(fh)
matches.append(get_match(root))
teams += get_teams(root)
players += get_players(root)
events = get_events(root)
store[f"events/match_{get_match_id(root)}"] = pd.DataFrame(events)
store["matches"] = pd.DataFrame(matches).drop_duplicates("wyId")
store["teams"] = pd.DataFrame(teams).drop_duplicates("wyId")
store["players"] = pd.DataFrame(players).drop_duplicates("wyId")
def get_match(root):
return root["match"]
def get_match_id(root):
return root["match"]["wyId"]
def get_teams(root):
return [t["team"] for t in root["teams"].values() if t.get("team")]
def get_players(root):
return [
player["player"]
for team in root["players"].values()
for player in team
if player.get("player")
]
def get_events(root):
return root["events"]
###################################
# Convert wyscout.h5 to spadl.h5
# WARNING: HERE BE DRAGONS
# This code for converting wyscout data was organically grown over a long period of time.
# It works for now, but needs to be cleaned up in the future.
# Enter at your own risk.
###################################
import socceraction.spadl.config as spadlcfg
spadl_length = spadlcfg.field_length
spadl_width = spadlcfg.field_width
bodyparts = spadlcfg.bodyparts
results = spadlcfg.results
actiontypes = spadlcfg.actiontypes
min_dribble_length = 3
max_dribble_length = 60
max_dribble_duration = 10
def convert_to_spadl(wyscouth5, spadlh5):
with pd.HDFStore(wyscouth5) as wyscoutstore, pd.HDFStore(spadlh5) as spadlstore:
print("...Inserting actiontypes")
spadlstore["actiontypes"] = pd.DataFrame(
list(enumerate(actiontypes)), columns=["type_id", "type_name"]
)
print("...Inserting bodyparts")
spadlstore["bodyparts"] = pd.DataFrame(
list(enumerate(bodyparts)), columns=["bodypart_id", "bodypart_name"]
)
print("...Inserting results")
spadlstore["results"] = pd.DataFrame(
list(enumerate(results)), columns=["result_id", "result_name"]
)
print("...Converting games")
matches = wyscoutstore["matches"]
games = convert_games(matches)
spadlstore["games"] = games
print("...Converting players")
spadlstore["players"] = convert_players(wyscoutstore["players"])
print("...Converting teams")
spadlstore["teams"] = convert_teams(wyscoutstore["teams"])
print("...Generating player_games")
player_games = []
for match in tqdm.tqdm(list(matches.itertuples()), unit="game"):
events = wyscoutstore[f"events/match_{match.wyId}"]
pg = get_player_games(match, events)
player_games.append(pg)
player_games = pd.concat(player_games)
spadlstore["player_games"] = player_games
print("...Converting events to actions")
for game in tqdm.tqdm(list(games.itertuples()), unit="game"):
events = wyscoutstore[f"events/match_{game.game_id}"]
actions = convert_actions(events, game.home_team_id)
spadlstore[f"actions/game_{game.game_id}"] = actions
gamesmapping = {
"wyId": "game_id",
"dateutc": "game_date",
"competitionId": "competition_id",
"seasonId": "season_id",
}
def convert_games(matches):
cols = ["game_id", "competition_id", "season_id", "game_date"]
games = matches.rename(columns=gamesmapping)[cols]
games["home_team_id"] = matches.teamsData.apply(lambda x: get_team_id(x, "home"))
games["away_team_id"] = matches.teamsData.apply(lambda x: get_team_id(x, "away"))
return games
def get_team_id(teamsData, side):
for team_id, data in teamsData.items():
if data["side"] == side:
return int(team_id)
playermapping = {
"wyId": "player_id",
"shortName": "short_name",
"firstName": "first_name",
"lastName": "last_name",
"birthDate": "birth_date",
}
def convert_players(players):
cols = ["player_id", "short_name", "first_name", "last_name", "birth_date"]
return players.rename(columns=playermapping)[cols]
teammapping = {
"wyId": "team_id",
"name": "short_team_name",
"officialName": "team_name",
}
def convert_teams(teams):
cols = ["team_id", "short_team_name", "team_name"]
return teams.rename(columns=teammapping)[cols]
def get_player_games(match, events):
game_id = match.wyId
teamsData = match.teamsData
duration = 45 + events[events.matchPeriod == "2H"].eventSec.max() / 60
playergames : dict = {}
for team_id, teamData in teamsData.items():
formation = teamData.get("formation", {})
pg = {
player["playerId"]: {
"game_id": game_id,
"team_id": team_id,
"player_id": player["playerId"],
"minutes_played": duration,
}
for player in formation.get("lineup", [])
}
substitutions = formation.get("substitutions", [])
if substitutions != "null":
for substitution in substitutions:
substitute = {
"game_id": game_id,
"team_id": team_id,
"player_id": substitution["playerIn"],
"minutes_played": duration - substitution["minute"],
}
pg[substitution["playerIn"]] = substitute
pg[substitution["playerOut"]]["minutes_played"] = substitution["minute"]
playergames = {**playergames, **pg}
return pd.DataFrame(playergames.values())
def convert_actions(events, home_team_id):
events = augment_events(events)
events = fix_wyscout_events(events)
actions = create_df_actions(events)
actions = fix_actions(actions)
actions = fix_direction_of_play(actions, home_team_id)
actions = fix_clearances(actions)
actions = add_dribbles(actions)
return actions
def augment_events(events_df):
events_df = pd.concat([events_df, get_tagsdf(events_df)], axis=1)
events_df = make_new_positions(events_df)
events_df["type_id"] = (
events_df["eventId"] if "eventId" in events_df.columns else events_df.eventName
)
events_df["subtype_id"] = (
events_df["subEventId"]
if "subEventId" in events_df.columns
else events_df.subEventName
)
events_df["period_id"] = events_df.matchPeriod.apply(lambda x: wyscout_periods[x])
events_df["player_id"] = events_df["playerId"]
events_df["team_id"] = events_df["teamId"]
events_df["game_id"] = events_df["matchId"]
events_df["milliseconds"] = events_df.eventSec * 1000
return events_df
def get_tag_set(tags):
return {tag["id"] for tag in tags}
def get_tagsdf(events):
tags = events.tags.apply(get_tag_set)
tagsdf = pd.DataFrame()
for (tag_id, column) in wyscout_tags:
tagsdf[column] = tags.apply(lambda x: tag_id in x)
return tagsdf
wyscout_periods = {"1H": 1, "2H": 2, "E1": 3, "E2": 4, "P": 5}
wyscout_tags = [
(101, "goal"),
(102, "own_goal"),
(301, "assist"),
(302, "key_pass"),
(1901, "counter_attack"),
(401, "left_foot"),
(402, "right_foot"),
(403, "head/body"),
(1101, "direct"),
(1102, "indirect"),
(2001, "dangerous_ball_lost"),
(2101, "blocked"),
(801, "high"),
(802, "low"),
(1401, "interception"),
(1501, "clearance"),
(201, "opportunity"),
(1301, "feint"),
(1302, "missed_ball"),
(501, "free_space_right"),
(502, "free_space_left"),
(503, "take_on_left"),
(504, "take_on_right"),
(1601, "sliding_tackle"),
(601, "anticipated"),
(602, "anticipation"),
(1701, "red_card"),
(1702, "yellow_card"),
(1703, "second_yellow_card"),
(1201, "position_goal_low_center"),
(1202, "position_goal_low_right"),
(1203, "position_goal_mid_center"),
(1204, "position_goal_mid_left"),
(1205, "position_goal_low_left"),
(1206, "position_goal_mid_right"),
(1207, "position_goal_high_center"),
(1208, "position_goal_high_left"),
(1209, "position_goal_high_right"),
(1210, "position_out_low_right"),
(1211, "position_out_mid_left"),
(1212, "position_out_low_left"),
(1213, "position_out_mid_right"),
(1214, "position_out_high_center"),
(1215, "position_out_high_left"),
(1216, "position_out_high_right"),
(1217, "position_post_low_right"),
(1218, "position_post_mid_left"),
(1219, "position_post_low_left"),
(1220, "position_post_mid_right"),
(1221, "position_post_high_center"),
(1222, "position_post_high_left"),
(1223, "position_post_high_right"),
(901, "through"),
(1001, "fairplay"),
(701, "lost"),
(702, "neutral"),
(703, "won"),
(1801, "accurate"),
(1802, "not_accurate"),
]
def make_position_vars(event_id, positions):
if len(positions) == 2: # if less than 2 then action is removed
start_x = positions[0]["x"]
start_y = positions[0]["y"]
end_x = positions[1]["x"]
end_y = positions[1]["y"]
elif len(positions) == 1:
start_x = positions[0]["x"]
start_y = positions[0]["y"]
end_x = start_x
end_y = start_y
else:
start_x = None
start_y = None
end_x = None
end_y = None
return | pd.Series([event_id, start_x, start_y, end_x, end_y]) | pandas.Series |
import os
import re
import json
import numpy as np
import pandas as pd
import operator
import base64
os.environ['DJANGO_SETTINGS_MODULE'] = 'zazz_site.settings'
import django
django.setup()
from django.core.exceptions import ObjectDoesNotExist
from django.core import serializers
from zazz import models
from time import gmtime, strftime
from functools import reduce
from itertools import product
from collections import OrderedDict, defaultdict
from zazz.models import Samples
print ('OFFLINE:')
g = {
}
import_errors = defaultdict(int)
'''
class Mutations(models.Model):
vep = models.ManyToManyField(to="VEP")
name = models.CharField(null=False, max_length=100)
alternative = models.CharField(null=True, max_length=100)
reference = models.CharField(null=True, max_length=100)
this_type = models.CharField(null=False, choices=[('name', 'GENERIC'), ('rs_name', 'rs'), ('hgvs_name', 'hgvs')], max_length=100)
'''
class ZazzException(Exception):
def set_info(self, info):
self.info = info
def convert_to_base64(s):
return base64.b64encode(bytes(s, encoding='ascii')).decode()
def decode_base64_json(s):
return json.loads(base64.b64decode(s.replace('_', '=')))
def print_now():
return strftime("%Y-%m-%d %H:%M:%S", gmtime())
def get_model(name):
return getattr(models, name)
def create_field_parameters(parameters):
return ', '.join(['{k} = {v}'.format(k=k,v=v) for k,v in parameters.items()])
def create_field(field):
# if field['type'] in ['MultiSelectField']:
# this_models = ''
# else:
# this_models = 'models.'
this_models = 'models.'
return ' {name} = {this_models}{type_}({parameters})'.format(
name=field['name'].replace(' ', '_'),
this_models=this_models,
type_ = field['type'],
parameters = create_field_parameters(field['parameters']),
)
def create_fields(fields):
return '\n'.join([create_field(field) for field in fields])
def get_table_pattern():
table_pattern = '''
class {table}(models.Model):
{meta_val}
{fields_val}
'''
return table_pattern
def table_pattern_f(table, fields_val, meta_val=''):
table_pattern = get_table_pattern()
return table_pattern.format(table=table, fields_val=fields_val, meta_val=meta_val)
def create_external(external):
#Create main
table = external['name']
#fields_keys = [x for x in external['fields'] if x['name'] in external['keys']]
fields_keys = external['fields']
fields_val = create_fields(fields_keys)
ret = table_pattern_f(table=table, fields_val=fields_val)
#Create secondary
return ret
def create_externals(externals):
'''
externals = [
{'name': 'Clinvar', 'filename': 'clinvar.csv', 'type': 'csv', 'fields':
[
{'name': 'Chromosome', 'type': 'CharField', 'parameters': {'max_length': '100'}},
{'name': 'Position', 'type': 'IntegerField', 'parameters': {}},
{'name': 'Clinical Significance', 'type': 'CharField', 'parameters': {'max_length': '100'}},
],
'keys': ['Chromosome', 'Position'],
},
]
'''
return '\n'.join(map(create_external, externals))
def create_table(table, fields, externals):
'''
table: Name of main table
fields: list fields that describe the database
'''
Many2ManyTables = {}
for field in fields:
#if field.get('table', False):
if field.get('database', False) == 'multi_1':
f_table = field['table']
if not f_table in Many2ManyTables:
Many2ManyTables[f_table] = []
Many2ManyTables[f_table].append(field)
'''
Many2ManyTables is a dictionary.
keys: are name of tables that we group fields together
values is a list of these fields
'''
# Transform Many2ManyTables to django tables format
Many2ManyTables_text = '\n'.join([table_pattern_f(k,create_fields(v)) for k,v in Many2ManyTables.items()])
# Add the "normal" fields (not Many2Many)
new_fields = [field for field in fields if field.get('database', False) != 'multi_1']
#Add fields for ManyToMany
#The main table needs to have a ManytoMany relationship with the Samples table
new_fields += [{'name': k, 'type': 'ManyToManyField', 'parameters': {'to': k}} for k,v in Many2ManyTables.items()]
#We also need to add a "raw" field for each many2many relationship
#We may have to remove this on the furture!
for k,v in Many2ManyTables.items():
for f in v:
# f = {'name': 'Sift', 'col_name': 'sift', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 20, 'database': 'multi', 'l': <function import_annotated_vcf.<locals>.<lambda> at 0x116418488>, 'l_multi': <function splitUnique.<locals>.f at 0x116418510>, 'table': 'Transcripts', 'order': 21}
#print (f)
field_to_add = dict(f)
# All raw fields should be CharFields !
if field_to_add['type'] != 'CharField':
field_to_add['type'] = 'CharField'
field_to_add['parameters']['max_length'] = '200'
field_to_add['name'] += '_raw'
new_fields.append(field_to_add)
# Create a multi field index
meta_val = '''
class Meta:
indexes = [
models.Index(
fields=['Chromosome', 'Position', 'Reference', 'Alternative'],
name='sample_idx',
),
]
'''
table_text = table_pattern_f(table=table, fields_val = create_fields(new_fields), meta_val=meta_val)
# print (table_text)
# a=1/0
models_pattern = '''
from django.db import models
# from multiselectfield import MultiSelectField
# Create your models here.
{Many2ManyTables}
class Data(models.Model):
field = models.CharField(null=True, max_length=200)
{table}
{externals}
'''
externals_text = create_externals(externals)
models_text = models_pattern.format(table=table_text, Many2ManyTables=Many2ManyTables_text, externals=externals_text)
print ('NEW MODELS:')
print (models_text)
print ('Saving to zazz/models.py..')
with open('zazz/models.py', 'w') as f:
f.write(models_text)
print ('..DONE')
print ('Running: python manage.py makemigrations ...')
command = 'python manage.py makemigrations zazz'
os.system(command)
print (' ..DONE')
print ('Running: python manage.py migrate')
command = 'python manage.py migrate'
os.system(command)
print(' ..DONE')
#print (Data.objects.all())
#df = pd.read_excel('annotations_zaganas.xlsx')
#print (df[:3])
#print ()
#print ("python manage.py makemigrations")
#print ("python manage.py migrate")
def create_js_field(field):
'''
IGNORE = DO NOT SHOW IN UI
'''
pattern = "{{'name': '{name}', 'type': '{type}', 'selected': false, 'e_order': -1, 'database': '{database}', {special}{renderer}{table}{xUnits}{order}{include} }}"
database = field.get('database', 'normal');
xUnits = ''
if field.get('component') == 'freetext':
type_ = 'freetext'
special = "'text' : ''" # The ng-model
elif field.get('component') == 'ignore':
type_ = 'ignore'
special = "'text' : ''" # The ng-model
elif field['type'] in ['CharField', 'ManyToManyField']:
type_ = 'checkbox'
special = "'itemArray': [], 'selected2': ['ALL']"
elif field['type'] in ['IntegerField', 'FloatField']:
type_ = 'slider'
special = ''''slider': {
'min': 30,
'max': 70,
'options': {
'floor': 1,
'ceil': 100,
'disabled': true,
'onEnd' : function (sliderId, modelValue, highValue, pointerType) {
console.log('Slider changed');
//console.log(modelValue); // This the min
//console.log(highValue); // This is the max
$scope.update_table();
}
},
}'''
if field['type'] == 'IntegerField':
if not 'xUnits' in field:
raise ZazzException('xUnit missing from IntegerField')
xUnits = ", 'xUnits': " + str(field['xUnits'])
elif field['type'] == 'ForeignKey':
type_ = 'checkbox'
special = "'itemArray': [], 'selected2': ['ALL']"
else:
raise ZazzException('Unknown field: {}'.format(field['type']))
if 'renderer' in field:
renderer = ", 'renderer': " + field['renderer']
else:
renderer = ''
if 'table' in field:
table = ", 'table': '" + field['table'] + "'"
else:
table = ''
if 'order' in field:
order = ", 'order': " + str(field['order'])
else:
order = ''
if 'include' in field:
include = ", 'include': " + str(field['include'])
else:
include = ''
values = {
'name': field['name'],
'type': type_,
'special': special,
'database': database,
'renderer': renderer,
'table': table,
'order': order,
'include': include,
'xUnits': xUnits,
}
return pattern.format(**values)
def create_js_fields(fields):
return ',\n'.join([create_js_field(x) for x in fields])
def create_js(fields):
'''
$scope.fields = [
//{'name': 'sample', 'type': 'checkbox', 'selected': false, 'itemArray': [{id: 1, name: ''}], 'selected2': {'value': {id: 1, name: ''}} },
{'name': 'sample', 'type': 'checkbox', 'selected': false, 'itemArray': [], 'selected2': ['ALL'], 'e_order': -1 },
{'name': 'Bases', 'type': 'slider', 'selected': false, 'slider': {
'min': 30,
'max': 70,
'options': {
'floor': 1,
'ceil': 100,
'disabled': true,
'onEnd' : function (sliderId, modelValue, highValue, pointerType) {
console.log('Slider changed');
//console.log(modelValue); // This the min
//console.log(highValue); // This is the max
$scope.update_table();
}
},
},
'e_order': -1},
{'name':'Barcode_Name', 'type':'checkbox', 'selected': false, 'itemArray': [], 'selected2': ['ALL'], 'e_order': -1 }
];
'''
print ('JAVASCRIPT:')
fields_val = f'$scope.fields=[{create_js_fields(fields)}];'
print (fields_val)
# Add fields javascript object in angular controller
z_zazz_ctrl_fn = 'zazz/static/zazz/zazz_Ctrl.js'
with open(z_zazz_ctrl_fn) as f:
z_zazz_ctrl = f.read()
z_zazz_ctrl_new = re.sub(
r'// FIELDS BEGIN\n.+\n// FIELDS END\n',
f'// FIELDS BEGIN\n{fields_val}\n// FIELDS END\n',
z_zazz_ctrl,
flags=re.DOTALL )
with open(z_zazz_ctrl_fn, 'w') as f:
f.write(z_zazz_ctrl_new + '\n')
print ('Javed javascript at:', z_zazz_ctrl_fn)
def is_dataframe(data):
'''
Return true if data is a pandas dataFrame
'''
return type(data) is pd.DataFrame
def chromosome_unifier(chromosome):
'''
All chromosome input should pass from this function.
Chromosome can be declared in multiple ways.. "1", chr1, chr01, ...
Here we make sure that all chromosome values are in the form chr1, chr2, chrX, chrY
'''
# "15" --> chr15
if re.match(r'^\d+$', chromosome):
return 'chr' + chromosome
if re.match(r'^chr[\dXY]+$', chromosome):
return chromosome
if chromosome.upper() in ['X', 'Y']:
return 'chr' + chromosome.lower()
raise ZazzException(f'Unknown Chromosome value: ->{chromosome}<-')
def get_value_from_record(field, record, line_index):
'''
Extract the value that is present in the record and is described in the field
field : Any item in fields list. field is a dictionary
record: Any item in input data.
DUPLICATE CODE!!
FIX ME!!
'''
if not field['col_name'] in record:
message = '{} does not exist in record\n'.format(field['col_name'])
message += 'Available columns:\n'
message += '\n'.join(record.keys()) + '\n'
raise ZazzException(message)
try:
if 'line_l' in field:
value = field['line_l'](record)
elif 'l' in field:
value = field['l'](record[field['col_name']])
else:
value = record[field['col_name']]
except ZazzException as t_exception:
e_message = str(t_exception)
e_info = t_exception.info
import_errors[e_message] += 1
value = None
except Exception as e:
print ('Record:')
print (record)
print ('Index:', line_index)
raise e
return value
def get_key_from_record(field):
'''
Get the name of the key of the record
'''
key = field['name']
if field.get('database', '') == 'multi_2':
pass
elif field.get('database', '') == 'multi_1':
key = field['name'] + '_raw'
return key
def create_m2m_table(schema, table):
'''
Create a dictionary with all the Many2Many tables.
Example: {'phylop', 'pfam', 'drugbank', 'go', 'dbsnp', 'omim', 'cosmic', 'Transcripts'}
key: multi_1 table
values: list with all column names.
'''
m2m_tables = defaultdict(list)
for field in schema:
if field.get('database', '') == 'multi_1':
#m2m_tables.add(field.get('table', table))
m2m_tables[field.get('table', table)].append(field)
return m2m_tables
def get_multi_1_records(m2m_tables, record, ):
'''
example of field:
{'name': 'ANN_GeneDetail_refGene', 'col_name': 'GeneDetail.refGene', 'type': 'CharField', 'parameters': {'max_length': '500', 'null': 'True'}, 'database': 'multi_1', 'table': 'ANN_GeneDetail_refGene', 'l_multi': lambda x : x.replace('\\x3d', '=').split('\\x3b'), 'order': 38},
Returns:
ret:
{
'nameof_m2m_tale' : {
m2m_field_1: [list of values],
m2m_field_2: [list of values],
}
}
ret_raw:
{
'nameof_m2m_tale' : {
m2m_field_1: raw_values,
m2m_field_2: raw_values,
}
}
'''
ret = defaultdict(dict)
ret_raw = defaultdict(dict)
for m2m_table_key, m2m_table_value in m2m_tables.items():
for field in m2m_table_value:
#print ('*** FIELD: ***')
#print (field)
unsplitted = record[field['col_name']]
splited_values = field['l_multi'](unsplitted)
ret[m2m_table_key][field['name']] = splited_values
if 'l_raw_multi' in field:
ret_raw[m2m_table_key][field['name'] + '_raw'] = field['l_raw_multi'](splited_values)
else:
ret_raw[m2m_table_key][field['name'] + '_raw'] = unsplitted
#print (ret)
#a=1/0
return ret, ret_raw
def create_attribute_records(record_list):
'''
record_list:
{'k': [1,2,3], 'l': [4,5,6]}
RETURNS:
[{'k': 1, 'l': 4}, {'k': 2, 'l': 5}, {'k':3, 'l': 6}]
'''
return [dict(zip(record_list.keys(), x)) for x in zip(*record_list.values())]
def import_data_append(input_data, schema, table, externals, **kwargs):
'''
Append new data
kwargs:
to_append_re : Regular expression to match new field names
'''
# Get kwargs
to_append_re = kwargs.get('to_append_re', None)
assert to_append_re
# Get table
table_db = getattr(models, table)
# Check type of input data
if is_dataframe(input_data):
data = input_data.to_dict('records')
elif type(input_data) is dict:
data = input_data
else:
raise ZazzException('input_data is not a pandas dataframe or a dictionary')
#Get the new fields that we will add.
print ('Selecting only fields according to regexp: {}'.format(to_append_re))
print ('Total fields: {}'.format(len(schema)))
fields = [field for field in schema if re.match(to_append_re, field['name'])]
print ('Fields after selection: {}'.format(len(fields)))
assert len(fields)
print ('APPENDING NEW FIELDS:')
for field in fields:
print (' ' + field['name'])
# Get m2m_table:
m2m_tables = create_m2m_table(fields, table)
#print (m2m_tables)
#a=1/0
this_error = defaultdict(int)
for line_index, record in enumerate(data):
#print (line_index, record['# locus'])
if (line_index+1) % 1000 == 0:
print ('{} Imported records: {}/{} {:.1%}'.format(print_now(), line_index+1, len(data), line_index/len(data)))
try:
database_record = table_db.objects.get(Position=record['Position'], Chromosome=record['Chromosome'], Reference=record['Reference'], Alternative=record['Alternative'])
except ObjectDoesNotExist as e:
this_error['Could not find chromosome/position in db'] += 1
continue
for field in fields:
value = get_value_from_record(field, record, line_index)
key = get_key_from_record(field)
#print ('{}={}'.format(field['name'], value))
setattr(database_record, key, value)
#database_record.save()
# Get multi_1 records:
#print ('GeneDetail.refGene = ', record['GeneDetail.refGene'])
multi_1_records, multi_1_records_raw = get_multi_1_records(m2m_tables, record)
#print ('*** multi_1_records: ***')
#print (multi_1_records)
#print ('*** multi_1_records_raw: ***')
#print (multi_1_records_raw)
# Store multi records
for m2m_table_key, m2m_table_value in m2m_tables.items():
for field in m2m_table_value:
# Add raw multi_1 records
setattr(database_record, field['name'] + '_raw', multi_1_records_raw[m2m_table_key][field['name'] + '_raw'])
#print (database_record)
#print (field['name'] + '_raw')
#print (multi_1_records[m2m_table_key][field['name'] + '_raw'])
#Create attribute dictionary
attribute_records = create_attribute_records(multi_1_records[m2m_table_key])
#print ('*** attribute_records ***')
#print (attribute_records)
m2m_objects = [getattr(models, m2m_table_key).objects.get_or_create(**attribute_record)[0] for attribute_record in attribute_records]
getattr(getattr(database_record, m2m_table_key), 'set')(m2m_objects)
database_record.save()
print ('IMPORT ERRORS ERRORS:')
print (json.dumps(this_error, indent=4))
def import_data(input_data, schema, table, externals, delete=True, **kwargs):
'''
model_instances = [MyModel(
field_1=record['field_1'],
field_2=record['field_2'],
) for record in df_records]
'''
# Make sure that there is one and only one of the basic keys
chromosome_field = [x for x in schema if x['name'] == 'Chromosome']
position_field = [x for x in schema if x['name'] == 'Position']
reference_field = [x for x in schema if x['name'] == 'Reference']
alternative_field = [x for x in schema if x['name'] == 'Alternative']
assert len(chromosome_field) == 1
assert len(position_field) == 1
assert len(reference_field) == 1
assert len(alternative_field) == 1
chromosome_field = chromosome_field[0]
position_field = position_field[0]
reference_field = reference_field[0]
alternative_field = alternative_field[0]
errors_1 = 0
print ('Importing externals..')
if delete:
print ('Deleting external --> internal')
for external in externals:
if external['type'] == 'internal':
print (' Deleting external --> internal table: {}'.format(external['name']))
get_model(external['name']).objects.all().delete()
print (' Done')
print ('Deleting externals')
for external in externals:
if external['type'] == 'csv':
print (' Deleting external table: {}'.format(external['name']))
get_model(external['name']).objects.all().delete()
print (' Done')
if False:
'''
This is an initial effort. It is too slow.
It stores all info in DB. This is inefficient if we only need a fraction of information
'''
print ('Importing External Data')
for external in externals:
if external['type'] == 'csv':
print (' Name: {}'.format(external['name']))
print (' Loading file: {}'.format(external['filename']))
csv = pd.read_csv(external['filename'])
csv_dict = csv.to_dict('index')
print (' Length: {}'.format(len(csv_dict)))
c = 0
for index, d in csv_dict.items():
c += 1
if c % 1000 == 0:
print (' {}, Records: {}'.format(print_now(), c))
if c > 1000:
break
#Build a dictionary with the fields. NO M2M
item_fields_no_m2m = {field['name']:field['l'](d) for field in external['fields'] if not field['type'] == 'ManyToManyField'}
new_item = get_model(external['name']).objects.get_or_create(**item_fields_no_m2m)[0]
#new_item.save()
# Build a dictionary with fields. WITH M2M
for field in external['fields']:
if field['type'] != 'ManyToManyField':
continue
item_fields_m2m = {field['name']:field['l'](d) for field in external['fields'] if field['type'] == 'ManyToManyField'}
for m2m_k, m2m_v in item_fields_m2m.items():
getattr(new_item, m2m_k).add(m2m_v)
new_item.save()
elif external['type'] == 'internal':
continue
print (' Done')
if is_dataframe(input_data):
df = input_data
elif type(input_data) is str:
input_data_ext = os.path.splitext(input_data)[1]
if input_data_ext == '.xlsx':
print ('Reading MAIN Excel: {}'.format(input_filename))
df = pd.read_excel(input_filename)
else:
raise Exception('Unknown file type: ', input_data_ext )
else:
raise Exception('Unknown input type', type(input_data).__name__)
if False:
print ('Keeping only 1000 records')
df = df[:1000]
data = df.to_dict('records')
table_db = getattr(models, table)
if delete:
print ('Deleting all..')
print ('Deleting table.. ', table)
table_db.objects.all().delete()
# Get the new fields that we will add.
to_append_re = kwargs.get('to_append_re')
if to_append_re:
print ('Adding only fields that match regexp: {}'.format(to_append_re))
print ('Total fields: {}'.format(len(schema)))
schema = [field for field in schema if re.match(to_append_re, field['name'])]
# Add basic fields as well
schema.extend([chromosome_field, position_field, reference_field, alternative_field])
print ('After regexp: {}'.format(len(schema)))
m2m_tables = set()
for field in schema:
if field.get('database', '') == 'multi_1':
m2m_tables.add(field.get('table', table))
if delete:
for m2m_table in m2m_tables:
print ('Deleting table.. ', m2m_table)
mm_db = getattr(models, m2m_table)
mm_db.objects.all().delete()
#(field['line_l'](record)) if 'line_l' in field else (field.get('l', lambda l:l)(record[field['col_name']]))
print ('Building instances..')
if False:
instances = [
table_db(**{
field['name'] + ('_raw' if field.get('table', table) != table else ''):
(field['line_l'](record)) if 'line_l' in field else (field.get('l', lambda l:l)(record[field['col_name']])) #(field['l'] if 'l' in field else lambda x:x)(record[field['col_name']])
for field in schema if 'col_name' in field # Add only fields that have col_name.
}) for record in data] # for field in schema if not field['type'] == 'ManyToManyField'}) for record in data]
def create_multi_dictionary():
'''
Create multi dictionary for multi_2
'''
multi_dictionary = defaultdict(list)
for field in schema:
if field.get('database', False) == 'multi_2':
multi_dictionary[field['table']].append(field)
return multi_dictionary
multi_dictionary = create_multi_dictionary()
def create_multi_record(index, record):
all_multi_value_lists = []
for multi_key, multi_fields in multi_dictionary.items():
#Get the values of each multi field
multi_values_values = []
multi_values_keys = []
for multi_field in multi_fields:
field_value = record[multi_field['col_name']]
field_value_splitted = multi_field['l_multi'](field_value)
multi_values_keys.append(multi_field['name'])
multi_values_values.append(field_value_splitted)
# Make sure that all lists has the same number of values
set_of_the_length_of_all_values = set(map(len, multi_values_values))
if len(set_of_the_length_of_all_values) != 1:
#error_message = 'Index: {} . Fields do not have the same size..'.format(index)
error_message = 'Multi fields do not have the same size..'
import_errors[error_message] += 1
print (error_message)
return None
#print ('multi_values_values:')
#print (multi_values_values)
#print ('multi_values_keys')
#print (multi_values_keys)
multi_values_list_of_dicts = [dict(zip(multi_values_keys,x)) for x in zip(*multi_values_values)]
# [{'gene': 'NBPF9', 'transcript': 'NM_001037675.3', 'location': 'exonic', 'function': 'missense', 'codon': 'CGC', 'exon': '7', 'protein': 'p.His295Arg', 'coding': 'c.885A>G', 'sift': None}, {'gene': 'NBPF8', 'transcript': 'NM_001037501.2', 'location': 'exonic', 'function': 'missense', 'codon': 'CGC', 'exon': '6', 'protein': 'p.His295Arg', 'coding': 'c.885A>G', 'sift': None}, {'gene': 'NBPF8', 'transcript': 'NR_102404.1', 'location': 'exonic_nc', 'function': None, 'codon': None, 'exon': '6', 'protein': None, 'coding': None, 'sift': None}, {'gene': 'NBPF8', 'transcript': 'NR_102405.1', 'location': 'exonic_nc', 'function': None, 'codon': None, 'exon': '5', 'protein': None, 'coding': None, 'sift': None}, {'gene': 'NBPF9', 'transcript': 'NM_001277444.1', 'location': 'exonic', 'function': 'missense', 'codon': 'CGC', 'exon': '7', 'protein': 'p.His295Arg', 'coding': 'c.885A>G', 'sift': None}]
#print (multi_values_list_of_dicts)
all_multi_value_lists.append(multi_values_list_of_dicts)
# Combine multiple values
#print (reduce(lambda x,y: x*y, all_multi_value_lists))
if not all_multi_value_lists:
return None
ret = [dict(reduce(operator.or_, [y.items() for y in x])) for x in product(*all_multi_value_lists)]
#print ('Multivalues:', len(ret))
#print (ret)
return ret
if True:
instances = []
for line_index, record in enumerate(data):
#print (line_index, record['# locus'])
if (line_index+1) % 1000 == 0:
print ('{} Imported records: {}/{} {:.1%}'.format(print_now(), line_index+1, len(data), line_index/len(data)))
table_db_options = {}
for field in schema:
if not 'col_name' in field: # Add only fields that have col_name.
continue
key = field['name']
if field.get('database', '') == 'multi_2':
continue # Later add multi_2 fields
elif field.get('database', '') == 'multi_1':
key = field['name'] + '_raw'
try:
if 'line_l' in field:
value = field['line_l'](record)
elif 'l' in field:
# col_name might not exist in record! Data is not supposed to contain all fields!
if not field['col_name'] in record:
continue
value = field['l'](record[field['col_name']])
else:
# col_name might not exist in record! Data is not supposed to contain all fields!
if not field['col_name'] in record:
continue
value = record[field['col_name']]
except ZazzException as t_exception:
e_message = str(t_exception)
e_info = t_exception.info
import_errors[e_message] += 1
value = None
except Exception as e:
print ('Record:')
print (record)
print ('Index:', line_index)
raise e
if pd.isnull(value):
value = None # np.nan confuses django when attempting: int(np.nan)
table_db_options[key] = value
multi_records = create_multi_record(line_index, record)
if multi_records:
for multi_record in multi_records:
table_db_options = {**table_db_options, **multi_record}
instances.append(table_db(**table_db_options))
else:
#print (table_db_options)
instances.append(table_db(**table_db_options))
count = len(instances)
print ('Adding IDs..')
for i, instance in enumerate(instances):
instance.id = i
print ('{} Bulk creating main objects..'.format(print_now()))
# bulk_create does not work with many-to-many relationships. ..sniff...
# https://docs.djangoproject.com/en/2.0/ref/models/querysets/
if False:
'''
For testing
'''
print (serializers.serialize("json", instances, indent=4))
for inst in instances:
inst.save()
print (inst.pk)
if True:
table_db.objects.bulk_create(instances)
print (' {} Done'.format(print_now()))
print ('Indexing main objects..')
querySet = table_db.objects.filter(id__gte=0, id__lt=count)
assert querySet.count() == count
index = {x.id:x for x in querySet}
m2m_index = {}
print ('Creating many to many relationships..')
#errors_1 = 0
def process_multi_1(store):
errors_1 = 0
# m2m_objects: store in memory ALL m2m object, so that we can bulk import them later
m2m_objects = defaultdict(list)
# For each record store which many to many has
m2m_object_references = defaultdict(dict)
for id_, record in enumerate(data):
instance = index[id_]
if id_ % 1000 == 0:
print ('{} Entries: {}/{}'.format(print_now(), id_+1, count))
#l_multi is obligatory
for m2m_table in m2m_tables:
try:
# field['col_name'] in record : col_name does not have to be present in record!
m2m_fields = OrderedDict({field['name']: field['l_multi'](record[field['col_name']]) for field in schema if field.get('table', None) == m2m_table and field['col_name'] in record})
except ZazzException as e:
import_errors[str(e)] += 1
print (str(e))
m2m_fields = {}
#assert that all have the same length
if not len(set(len(x) for x in m2m_fields.values())) == 1:
print ('Index: {} . Fields do not have the same size..'.format(id_))
debug = {field['name']: record[field['col_name']] for field in schema if field.get('table', None) == m2m_table and field['col_name'] in record}
#print (debug)
#print (m2m_fields)
errors_1 += 1
m2m_fields = {}
#raise Exception()
#Create database objects
# {a: [1,2] , b: [3,4]} --> [{a:1, b:3} , {a:2, b:4}]. See also create_attribute_records()
m2m_fields = [dict(zip(m2m_fields.keys(), x)) for x in zip(*m2m_fields.values())]
current_length = len(m2m_objects[m2m_table])
m2m_objects[m2m_table].extend(m2m_fields)
m2m_object_references[id_][m2m_table] = (current_length, current_length+len(m2m_fields))
# m2m_fields: [{'Gene': 'CLCNKB', 'Transcript': 'NM_000085.4'}, {'Gene': 'CLCNKB', 'Transcript': 'NM_001165945.2'}]
if not m2m_fields:
# Do nothing.
#getattr(getattr(instance, m2m_table), 'set')(None)
#instance.save()
continue
if False:
'''
Always create new multi object
'''
m2m_objects = [getattr(models, m2m_table)(**m2m_field) for m2m_field in m2m_fields]
#Save objects
for o in m2m_objects:
o.save()
if False:
'''
Create only if they don't exist
'''
m2m_objects = [getattr(models, m2m_table).objects.get_or_create(**m2m_field)[0] for m2m_field in m2m_fields]
if store:
'''
Create only if they don't exist
'''
m2m_objects = [getattr(models, m2m_table).objects.get(**m2m_field)[0] for m2m_field in m2m_fields]
#print (m2m_table, m2m_fields)
#Add it to the main instance
if False:
getattr(getattr(instance, m2m_table), 'set')(m2m_objects)
if store:
#Save instance
instance.save()
return m2m_objects, m2m_object_references
m2m_objects, m2m_object_references = process_multi_1(store=False)
print ('Bulk creating Many2Many Objects')
table_insance_objects = {}
for m2m_table, m2m_values in m2m_objects.items():
print (' Bulk creating:', m2m_table)
table_instance = getattr(models, m2m_table)
table_insance_objects[m2m_table]= [table_instance(**x) for x in m2m_values]
getattr(models, m2m_table).objects.bulk_create(table_insance_objects[m2m_table])
print (' Getting Primary Key of:', m2m_table)
table_insance_objects[m2m_table] = table_instance.objects.all().order_by('pk')
print ('Connecting main instance with m2m..')
#Create through objects
through_objects = {m2m_table: getattr(Samples, m2m_table).through for m2m_table in m2m_tables}
for id_, record in enumerate(data):
if id_ % 1000 == 0:
print ('{} {}/{}'.format(print_now(), id_, len(data)))
instance = index[id_]
#
if not id_ in m2m_object_references:
continue
for table_name, table_indexes in m2m_object_references[id_].items():
#print (table_insance_objects[table_name][table_indexes[0]: table_indexes[1]+1])
if True:
'''
2019-04-18 16:09:42 0/10000
2019-04-18 16:10:15 1000/10000 --> 33
2019-04-18 16:10:48 2000/10000 --> 33
2019-04-18 16:11:22 3000/10000 --> 34
2019-04-18 16:11:57 4000/10000 --> 35
2019-04-18 16:12:33 5000/10000 --> 36
'''
getattr(getattr(instance, table_name), 'set')(table_insance_objects[table_name][table_indexes[0]: table_indexes[1]+1])
if False:
'''
2019-04-18 16:05:47 0/10000
2019-04-18 16:06:14 1000/10000 --> 27
2019-04-18 16:06:43 2000/10000 --> 29
2019-04-18 16:07:13 3000/10000 --> 30
2019-04-18 16:07:48 4000/10000 --> 35
2019-04-18 16:08:27 5000/10000 --> 39
'''
tmp1 = [{table_name.lower() + '_id': table_insance_objects[table_name][i].pk, 'samples_id': instance.pk} for i in range(table_indexes[0], table_indexes[1]+1)]
#print (tmp1)
tmp2 = [through_objects[table_name](**x) for x in tmp1]
#print (tmp2)
through_objects[table_name].objects.bulk_create(tmp2)
instance.save()
#a=1/0
print ('Errors 1:', errors_1)
print ('Annotating with external CSVs')
#Index external_internals
external_internals = {external['name']:external for external in externals if external['type'] == 'internal'}
for external in externals:
if external['type'] == 'csv':
external_name = external['name']
print (' Name: {}'.format(external_name))
print (' Loading file: {}'.format(external['filename']))
csv = pd.read_csv(external['filename'], **external['read_csv_options'])
csv_dict = csv.to_dict('index')
print (' DONE. Length: {}'.format(len(csv_dict)))
#Take the central table object
all_objects = table_db.objects.all()
print (' Annotating {} main records'.format(all_objects.count()))
o_counter = 0
o_annotated = 0
for o in all_objects:
o_counter += 1
if o_counter % 100 == 0:
print (' {}. Objects: {} Annotated: {}'.format(print_now(), o_counter, o_annotated))
matched = external['matcher'](csv, o) # THIS IS VERY SLOW!!
if matched.empty:
continue
o_annotated += 1
# This is not empty
# Create foreign object
# Create not M2M
not_m2m = {field['name']:fields['l'](matched) for field in external['fields'] if not field['type'] == 'ManyToManyField'}
foreign_object = get_model(external_name)(**not_m2m)
# Save model
foreign_object.save()
# Create M2M objects
m2m = {field['name']: field['l_m2m'](matched) for field in external['fields'] if field['type'] == 'ManyToManyField'}
#print (m2m) # {'Clinical_Significance': [{'Clinical Significance': 'Benign'}]}
m2m_objects = {k: [get_model(k).objects.get_or_create(**x)[0] for x in v] for k,v in m2m.items()}
#print (m2m_objects)
#Connect with foreign_object
for k, v in m2m_objects.items():
getattr(foreign_object, k).set(v)
#Save foreign_object
foreign_object.save()
#Now that we have the foreign_object stored, we can connect it with the foreign key of the main object
setattr(o, external_name, foreign_object) # o.external_name = foreign_object
#Update main object
o.save()
print ('Annotated {} out of {} records'.format(o_annotated, o_counter))
print ('DONE!')
if False: # This is legacy code. To be removed...
for field in schema:
if not field['type'] == 'ManyToManyField':
continue
if instance is None:
instance = index[id_]
values = field['l_multi'](record[field['col_name']])
#Store the values
m2m_db = getattr(models, field['name'])
if not field['name'] in m2m_index:
m2m_index[field['name']] = {}
#Perform as little as possible queries to the database
for value in values:
if not value in m2m_index[field['name']]:
m2m_index[field['name']][value] = m2m_db.objects.get_or_create(**{field['name']:value})[0]
values_obj = [m2m_index[field['name']][value] for value in values]
#Create M2M relationship
getattr(getattr(instance, field['name']+'_multi'), 'set')(values_obj)
instance.save()
print ('IMPORT ERRROS')
print (json.dumps(import_errors, indent=4))
print ('DONE')
def comma_int(x):
return int(x.replace(',', ''))
def isNone(x):
return None if pd.isnull(x) else x
def splitUnique(field_name, sep, t=str):
'''
t = type
'''
def f(x):
if pd.isnull(x):
return [None]
if not hasattr(x, 'split'):
if t == str:
return [str(x)]
elif t == int:
return [int(x)]
elif t == float:
return [float(x)]
raise ZazzException(f'Invalid type: {type(x).__name__} in field: {field_name}')
return [y if y else None for y in x.split(sep)]
return f
def join_set_sep(sep):
def f(x):
if | pd.isnull(x) | pandas.isnull |
"""
Generate features for most relevant EBM risk functions directly from MIMIC-IV database.
"""
import argparse
import datetime
import json
import numpy as np
import os
import pandas as pd
from joblib import Parallel, delayed
from helper.io import read_item_processing_descriptions_from_excel
from helper.util import output_wrapper
from preprocessing.step5_generate_features_for_stays import decode_feature_time_span, feat_func_map, \
feat_func_interval_map, feat_func_interval_start_end_map, feat_func_with_times_map, feat_func_with_times_span_map, \
decode_func_and_feat_name, feat_methods
from research_database.research_database_communication import ResearchDBConnection, resolve_data_column_name, \
MIMICDBConnection
# Min/max values derived from concept creation routines and when necessary as seen during RF review.
mimic_features_ebm = [
# Variable name, interval, function, min, max, imputation, MIMIC mimic_icu.d_items item id
# own min/max
# removed after inspections
# ('CK', '7d', 'median', 1., 10000., 201., [225634]),
('CK', '7d', 'min', 1., 10000., 201., [225634]),
('CK-MB', '3d', 'median', 1., 10000., None, [227445]),
('CK-MB', '3d', 'max', 1., 10000., None, [227445]),
# own min/max
('Chloride', '3d', 'trend', 1., 200., 103., [220602]),
('Chloride', '1d', 'min', 1., 200., 103., [220602]),
# own min/max
# removed after inspections
# ('PTT', '3d', 'min', 1., 500., 34.3, [227466]),
# ('PTT', '3d', 'max', 1., 500., 34.3, [227466]),
# ('PTT', '1d', 'max', 1., 500., 34.3, [227466]),
# ('PTT', '7d', 'min', 1., 500., 34.3, [227466]),
# ('PTT', '7d', 'max', 1., 500., 34.3, [227466]),
# Used both arterial and venous BGA together as in original cohort. Use own min/max against artifacts.
('pH', '1d', 'median', 7.0, 7.8, 7.39, [220274, 223830]),
('pH', '3d', 'median', 7.0, 7.8, 7.39, [220274, 223830]),
('pH', '3d', 'trend', 7.0, 7.8, 7.39, [220274, 223830]),
('pH', '1d', 'iqr', 7.0, 7.8, 7.39, [220274, 223830]),
('Blood volume out', '7d', 'extrapolate', None, None, None, [226626, 226629]),
('Blood volume out', '3d', 'extrapolate', None, None, None, [226626, 226629]),
# own min/max
('Hematocrit', '3d', 'max', 1., 100., 28.7, [220545]),
('Hematocrit', '12h', 'median', 1., 100., 28.7, [220545]),
# Only "Arterial Base Excess"
('BE', '12h', 'median', -25., 25., 0.0, [220545]),
('BE', '3d', 'trend', -25., 25., 0.0, [220545]),
# removed after inspections
# ('BE', '3d', 'iqr', -25., 25., 0.0, [220545]),
('BE', '12h', 'min', -25., 25., 0.0, [220545]),
('BE', '1d', 'iqr', -25., 25., 0.0, [220545]),
# own min/max
('Phosphate', '1d', 'max', 0.01, 500., None, [225677]),
('Phosphate', '7d', 'min', 0.01, 500., None, [225677]),
('Potassium', '1d', 'median', None, None, None, [227442]),
# Use endtime of invasive and non-invasive ventilation.
('Is on automatic ventilation', 'per-icu-stay', 'true-until-discharge', None, None, None, [225792, 225794]),
('RAS scale', '3d', 'max', None, None, 0.0, [228096]),
# removed after inspections
# ('RAS scale', '12h', 'trend', None, None, 0.0, [228096]),
('RAS scale', '1d', 'max', None, None, 0.0, [228096]),
# Arterial, Venous, Mixed Venous, own min/max
('pO2', '12h', 'min', 1., 200., 102., [220224, 226063, 227516]),
('pO2', '12h', 'iqr', 1., 200., 102., [220224, 226063, 227516]),
# Arterial, mixed venous, central venous, pulseoxymetry,
('O2 saturation', '12h', 'min', 0.1, 100., 97., [220227, 225674, 227686, 220277]),
# Ionized as for original cohort. own min/max
# removed after inspections
# ('Calcium', '3d', 'trend', 0.1, 30., 1.12, [225667]),
('Calcium', '1d', 'max', 0.1, 30., 1.12, [225667]),
# Min/max from derived vital signs
('Heart rate', '4h', 'min', 1, 299, None, [220045]),
('Heart rate', '1d', 'min', 1, 299, None, [220045]),
('Heart rate', '4h', 'iqr', 1, 299, None, [220045]),
# In contrast to original cohort only two items (Temp. celcius, Blood temperature) and no complex merging procedure.
# removed after inspections
# ('Body core temperature', '1d', 'min', 10.1, 49.9, 37., [223762, 226329]),
# ('Body core temperature', '4h', 'min', 10.1, 49.9, 37., [223762, 226329]),
('Body core temperature', '1d', 'median', 10.1, 49.9, 37., [223762, 226329]),
('Body core temperature', '1d', 'trend', 10.1, 49.9, 37., [223762, 226329]),
# Arterial, Venous, own min/max
('pCO2', '1d', 'median', 1., 100., 41., [220235, 226062]),
('pCO2', '1d', 'iqr', 1., 100., 41., [220235, 226062]),
('pCO2', '3d', 'min', 1., 100., 41., [220235, 226062]),
# own min/max
# removed after inspections
# ('Leucocytes', '3d', 'trend', 0.1, 100., 10.8, [220546]),
('Leucocytes', '1d', 'median', 0.1, 100., 10.8, [220546]),
('Leucocytes', '3d', 'iqr', 0.1, 100., 10.8, [220546]),
('Blood Urea Nitrogen', '3d', 'min', None, None, None, [225624]),
# Urine and GU Irrigant Out, GU Irrigant/Urine Volume Out, OR, PACU
('Urine volume out', '1d', 'extrapolate', None, None, None, [226566, 227489, 226627, 226631]),
('Urine volume out', '7d', 'extrapolate', None, None, None, [226566, 227489, 226627, 226631]),
('Bilirubin total', '7d', 'max', None, None, None, [225690]),
# own min/max
('Lactate', '3d', 'max', 0.01, 50., 1.8, [225668]),
('Lactate', '12h', 'min', 0.01, 50., 1.8, [225668]),
# own min/max
('Sodium', '3d', 'trend', 1., 300., 139., [220645]),
('Sodium', '3d', 'median', 1., 300., 139., [220645]),
# own min/max
('Hemoglobin', '3d', 'max', 1., 30., 9.5, [220228]),
# Used all BP values of category "Routine Vital Signs"
('Diastolic blood pressure', '1d', 'median', None, None, 61, [227242, 224643, 225310, 220180,
220051]),
('Mean blood pressure', '4h', 'median', None, None, 77, [225312, 220181, 220052]),
('Mean blood pressure', '12h', 'median', None, None, 77, [225312, 220181, 220052]),
('Systolic blood pressure', '12h', 'iqr', None, None, 117, [227243, 224167, 225309, 220179,
220050]),
# Used "Respiratory Rate", "RR spontaneous" and "RR total"
('Estimated respiratory rate', '1d', 'median', None, None, None, [220210, 224689, 224690]),
# own min/max
('Thrombocytes', '7d', 'trend', 1., 1000., 176., [227457]),
('Glucose', '3d', 'median', 10., 1200., 127., [220621]),
# Used "Invasive ventilation"
('Tubus exists', 'per-icu-stay', 'true-until-discharge', None, None, None, [225792]),
# own min/max
('C-reactive protein', '3d', 'max', 0.01, 60., None, [225792]),
# No drugs for constipation in icu medications and also only few identified in emar(_detail)
# Hence, use "Elimination NCP - Interventions" (229134) instead as an indicator for constipation.
('Drugs for constipation', '1d', 'last', None, None, None, [229134]),
# Special routines necessary:
('Age', 'per-patient', 'last', None, None, None, []),
('Length of stay before ICU', 'per-patient', 'last', None, None, None, []),
('paO2/FiO2', '1d', 'median', None, None, 210., []),
('paO2/FiO2', '3d', 'trend', None, None, 210., []),
('paO2/FiO2', '3d', 'median', None, None, 210., []),
# In MIMIC cohort always 1.
('MetHb', '12h', 'min', None, None, 1., []),
('GCS score', '3d', 'min', None, None, 15, []),
# own min/max
('eGFR', '7d', 'trend', 0., 300., 68.4, []),
('Gamma-GT', '7d', 'median', None, None, None, []),
('Antithrombotic agents prophylactic dosage', 'per-icu-stay', 'true-until-discharge', None, None, None, []),
# Not in MIMIC DB
# 'Procalcitonin (max 7d) [ng/mL]'
# 'RHb (median 12h)'
]
mimic_variables_gbm = [
'Age', 'Antithrombotic agents prophylactic dosage', 'BE', 'Bilirubin total', 'Blood Urea Nitrogen',
'Blood volume out', 'Body core temperature', 'C-reactive protein', 'CK', 'CK-MB', 'Calcium', 'Chloride',
'Diastolic blood pressure', 'Drugs for constipation', 'Estimated respiratory rate', 'GCS score', 'Gamma-GT',
'Glucose', 'Heart rate', 'Hematocrit', 'Hemoglobin', 'Is on automatic ventilation', 'Lactate',
'Length of stay before ICU', 'Leucocytes', 'Mean blood pressure', 'MetHb', 'O2 saturation', 'Phosphate',
'Potassium', 'RAS scale', 'Sodium', 'Systolic blood pressure', 'Thrombocytes', 'Tubus exists', 'Urine volume out',
'eGFR', 'pCO2', 'pH', 'pO2', 'paO2/FiO2'
]
def main():
parser = argparse.ArgumentParser(description='Extract features.')
parser.add_argument('password', type=str, help='Database password.')
parser.add_argument('item_overview', type=str, help='Description of all PDMS items and generated variables.')
parser.add_argument('output_file', type=str, help='File to store output data.')
parser.add_argument('--gbm', action='store_true', help='Create all features for included variables.')
parser.add_argument('--force', action='store_true', help='Force to override output file.')
args, _ = parser.parse_known_args()
db_conn = ResearchDBConnection(args.password)
mimic_stays = db_conn.read_table_from_db('mimic_stays')
# Add anchor year to stays to use it for the output.
mimic_db_conn = MIMICDBConnection(args.password)
mimic_patients = mimic_db_conn.read_table_from_db('mimic_core', 'patients')[['subject_id', 'anchor_year_group']]
old_num_mimic_stays = mimic_stays.shape[0]
mimic_stays = pd.merge(mimic_stays, mimic_patients, how='left', on='subject_id')
mimic_stays['anchor_year_group'] = mimic_stays['anchor_year_group'].map(
{'2008 - 2010': | pd.to_datetime('2008', format='%Y') | pandas.to_datetime |
from collections import defaultdict
import pandas as pd
import random
from anytree import Node, NodeMixin, LevelOrderIter, RenderTree
#This is the maximum size of the prefix, suffix and substring that will be counted
MAX_STR_SIZE = 8
#Class to denote the node for a generic summary data structure.
class SummaryDSNode(NodeMixin):
def __init__(self, name, parent=None, children=None):
super(SummaryDSNode, self).__init__()
self.name = name
self.frequency = 1
self.parent = parent
self.char_to_children_dict = {}
self.transition_probabilities = {}
#Compute transition probabilities based on Eq 5 of the paper
def update_transition_probabilities(self, root_node):
k = len(self.children)
total_frequency = sum([child.frequency for child in self.children])
numerator, denominator = k , k+1
if self.parent == root_node:
numerator = k + 1
else:
self.transition_probabilities[self.parent] = 1.0 / denominator
fraction = (numerator / denominator )
for child in self.children:
probability = 0.0
if total_frequency > 0:
probability = (child.frequency / total_frequency) * fraction
self.transition_probabilities[child] = probability
#This class represents the entire generic summary data structure.
#Using a common for ease of coding.
#It can be replaced with more performant ones such as prefix trees, suffix trees etc.
class SummaryDataStructure:
#string_generator_fn is a function that takes a string as input
#and outputs a list of "substrings" of interest.
#for e.g. all prefixes, suffixes,
#max_str_size: will be the largest prefix, substring, suffix string that will be created
#split_words: whether to ignore spaces in a string.
#if split_words is true, then "a b" will be inserted as two words a b .. else one word with space.
def __init__(self, string_generator_fn, max_str_size=MAX_STR_SIZE, split_words=True):
self.string_generator_fn = string_generator_fn
self.max_str_size = max_str_size
self.split_words = split_words
self.root_node = SummaryDSNode('')
def insert_string(self, string):
substrings_of_interest = self.string_generator_fn(string)
for substring in substrings_of_interest:
cur_node = self.root_node
for index, char in enumerate(substring):
if char in cur_node.char_to_children_dict:
cur_node = cur_node.char_to_children_dict[char]
else:
new_node = SummaryDSNode(substring[:index+1], parent=cur_node)
cur_node.char_to_children_dict[char] = new_node
cur_node = new_node
#Increment the frequency of the last node
cur_node.frequency = cur_node.frequency + 1
def update_summary_ds_from_file(self, input_file_name):
with open(input_file_name) as f:
for line in f:
strings = [line.strip()]
if self.split_words:
strings = line.strip().split()
for string in strings:
self.insert_string(string)
#returns a data frame with all the strings in the summary data structure and its frequencies
def get_selectivities(self):
string_frequency_dict = defaultdict(int)
for node in LevelOrderIter(self.root_node):
if node.is_root == False:
string_frequency_dict[node.name] = max(1, node.frequency - 1)
df = | pd.DataFrame.from_dict(string_frequency_dict, orient='index') | pandas.DataFrame.from_dict |
from django.http import JsonResponse, HttpResponse, HttpResponseRedirect
from django.template.loader import render_to_string
from django.contrib.messages.views import SuccessMessageMixin
from django.urls import reverse_lazy, reverse
from django.views import generic, View
from django.shortcuts import render, redirect
from webdriver_manager.chrome import ChromeDriverManager
from naver_book_crawling.generic import (
BSModalLoginView,
BSModalFormView,
BSModalCreateView,
BSModalUpdateView,
BSModalReadView,
BSModalDeleteView
)
from .forms import (
BookModelForm,
CustomUserCreationForm,
CustomAuthenticationForm,
BookFilterForm,
CrawlingForm,
isbnCrawlingForm,
bidCrawlingForm
)
from .models import Book, BookCrawling, BidCrawling
# 크롤링 라이브러리
import re
from bs4 import BeautifulSoup
import pandas as pd
from urllib.request import urlopen, HTTPError, URLError
from urllib.parse import quote_plus
from selenium import webdriver
import time
from sqlalchemy import create_engine
import pymysql
from webdriver_manager.chrome import ChromeDriverManager
import datetime
import numpy as np
from selenium.webdriver.common.alert import Alert
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
import os
# 검색
from django.contrib import messages
from django.db.models import Q
class Index(generic.ListView):
model = BookCrawling
paginate_by = 20
context_object_name = 'books'
template_name = 'index.html'
def get_queryset(self):
qs = super().get_queryset()
if 'type' in self.request.GET:
qs = qs.filter(boot_type=int(self.request.GET['type']))
return qs
# 페이징
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
paginator = context['paginator']
page_number_range = 10
max_index = len(paginator.page_range)
page = self.request.GET.get('page')
current_page = int(page) if page else 1
start_index = int((current_page - 1) /
page_number_range) * page_number_range
end_index = start_index + page_number_range
if end_index >= max_index:
end_index = max_index
page_range = paginator.page_range[start_index:end_index]
context['page_range'] = page_range
return context
def Detail(request, id):
bookCrawling = BookCrawling.objects.get(pk=id)
context = {
'bookCrawling': bookCrawling,
}
return render(request, 'book/detail_book.html', context)
def BidDetail(request, bid):
bidCrawling = BidCrawling.objects.get(pk=bid)
context = {
'bidCrawling': bidCrawling,
}
return render(request, 'book/bid_detail_book.html', context)
def get_text_list(tag_list):
return [tag.text for tag in tag_list]
def crawling(request):
return render(request, 'book/crawling_book.html')
def bidCrawler(request):
return render(request, 'book/bidCrawling_book.html')
class bidIndex(generic.ListView):
model = BidCrawling
paginate_by = 20
context_object_name = 'bids'
template_name = 'bid_index.html'
def get_queryset(self):
qs = super().get_queryset()
if 'type' in self.request.GET:
qs = qs.filter(boot_type=int(self.request.GET['type']))
return qs
# 페이징
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
paginator = context['paginator']
page_number_range = 10
max_index = len(paginator.page_range)
page = self.request.GET.get('page')
current_page = int(page) if page else 1
start_index = int((current_page - 1) /
page_number_range) * page_number_range
end_index = start_index + page_number_range
if end_index >= max_index:
end_index = max_index
page_range = paginator.page_range[start_index:end_index]
context['page_range'] = page_range
return context
def crawlingBook(request):
if request.method == 'POST':
form = CrawlingForm(request.POST)
if form.is_valid():
plusUrl = form.cleaned_data['searchsubject']
searchStartPage = form.cleaned_data['searchstartpage']
searchEndPage = form.cleaned_data['searchendpage']
baseUrl = 'https://book.naver.com/search/search_in.nhn?query='
searchSubject = quote_plus(plusUrl)
url = baseUrl + searchSubject + "&&"
html = urlopen(url)
# url = requests.get(baseUrl + searchSubject + "&&")
# html = url.content
soup = BeautifulSoup(html, "html.parser")
# book_crawling 테이블 column
search_list = []
title_list = []
intro_list = []
author_intro_list = []
category_top_list = []
category_middle_list = []
category_bottom_list = []
ISBN_list = []
writer_list = []
translator_list = []
painter_list = []
publisher_list = []
publish_date_list = []
content_list = []
bid_list = []
image_list = []
grade_list = []
review_list = []
# writer_info 테이블 column
writer_name = []
writer_link = []
writer_num = []
writer_book_title = []
writer_bid = []
writer_isbn = []
options = webdriver.ChromeOptions()
options.add_argument('headless')
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
driver.implicitly_wait(time_to_wait=5)
# searchStartPage = ""
# searchEndPage = ""
searchStart = int(searchStartPage)
searchEnd = int(searchEndPage)
for k in range(searchStart, searchEnd+1):
url_pre = (url+'pattern=0&orderType=rel.desc&viewType=list&searchType=bookSearch&serviceSm=service.basic&title=&author=&publisher=&isbn=&toc=&subject=&publishStartDay=&publishEndDay=&categoryId=&qdt=1&filterType=0&filterValue=&serviceIc=service.author&buyAllow=0&ebook=0&abook=0&page='+str(k))
html_pre = urlopen(url_pre)
# url_pre = requests.get(baseUrl + searchSubject + '&&'+'pattern=0&orderType=rel.desc&viewType=list&searchType=bookSearch&serviceSm=service.basic&title=&author=&publisher=&isbn=&toc=&subject=&publishStartDay=&publishEndDay=&categoryId=&qdt=1&filterType=0&filterValue=&serviceIc=service.author&buyAllow=0&ebook=0&abook=0&page='+str(k))
# html_pre = url_pre.content
soup_pre = BeautifulSoup(html_pre, "html.parser")
title = ''
title = get_text_list(soup_pre.select('dt'))[0:20]
for i in range(0, len(title)):
if '\xa0' in title[i]:
title[i] = title[i][0:title[i].find(
'\xa0')].replace('\n', '')
title_list.append(title[i])
link_list = []
try:
for href in soup_pre.find("ul", id="searchBiblioList").find_all("dt"):
link = href.find("a")["href"]
bid = href.find("a")["href"].split('=')[1]
link_list.append(link)
bid_list.append(bid)
except AttributeError:
print('검색어에 대한 도서' + str(len(ISBN_list)) + '권 크롤링이 완료되었습니다.')
driver.quit()
for i in range(0, len(link_list)):
# driver.get(link_list[i])
url_det = link_list[i]
html_det = urlopen(url_det)
# url_det = requests.get(link_list[i])
# html_det = url_det.content
soup_det = BeautifulSoup(html_det, "html.parser")
search_list.append(plusUrl)
try:
book_intro = soup_det.find('div', id='bookIntroContent')
book_intro_text = book_intro.get_text().replace('\n', '')
intro_list.append(book_intro_text)
except AttributeError:
book_intro_text = '없음'
intro_list.append(book_intro_text)
try:
author_intro = soup_det.find(
'div', id='authorIntroContent')
author_intro_text = author_intro.get_text().replace('\n', '')
author_intro_list.append(author_intro_text)
except AttributeError:
author_intro_text = '없음'
author_intro_list.append(author_intro_text)
try:
category_top = soup_det.find('li', class_='select')
category_top_text = category_top.get_text().replace('\n', '')
category_top_list.append(category_top_text)
except AttributeError:
category_top_text = '없음'
category_top_list.append(category_top_text)
try:
category_middle = soup_det.find('li', class_='select2')
category_middle_text = category_middle.get_text().replace('\n', '')
category_middle_list.append(category_middle_text)
except AttributeError:
category_middle_text = '없음'
category_middle_list.append(category_middle_text)
try:
category_bottom = soup_det.find('li', class_='select3')
category_bottom_text = category_bottom.get_text().replace('\n', '')
category_bottom_list.append(category_bottom_text)
except AttributeError:
category_bottom_text = '없음'
category_bottom_list.append(category_bottom_text)
grade = soup_det.find("div", class_="txt_desc").find(
"strong").text[:-1]
grade_list.append(grade)
review = soup_det.find(
"a", id="txt_desc_point").find_all("strong")[1].text
review_list.append(review)
book_info = soup_det.find('div', class_='book_info_inner')
book_info_text = book_info.get_text()
editor_exist = soup_det.find(
"div", class_="book_info_inner").find_all("em")[0:3]
if '저자' in book_info_text:
writer_str = book_info_text.find('저자')+3
writer_end = book_info_text.find('|', writer_str)
writer = book_info_text[writer_str:writer_end]
else:
writer_str = book_info_text.find('글')+2
writer_end = book_info_text.find('|', writer_str)
writer = book_info_text[writer_str:writer_end]
if '\xa0' in writer:
writer = writer[0:int(writer.find('\xa0'))]
writer_list.append(writer)
if '편집' not in book_info_text:
if ('그림' in book_info_text) and ('역자' in book_info_text):
painter_str = book_info_text.find('그림')+3
painter_end = book_info_text.find('|', painter_str)
painter = book_info_text[painter_str:painter_end]
painter_list.append(painter)
translator_str = book_info_text.find('역자')+3
translator_end = book_info_text.find(
'|', translator_str)
translator = book_info_text[translator_str:translator_end]
translator_list.append(translator)
publisher_str = translator_end+1
publisher_end = book_info_text.find(
'\n', publisher_str)
publisher = book_info_text[publisher_str:publisher_end]
publisher_list.append(publisher)
publish_date_str = publisher_end+2
publish_date_end = book_info_text.find(
'\n', publish_date_str)
publish_date = book_info_text[publish_date_str:publish_date_end]
publish_date_list.append(publish_date)
elif ('그림' in book_info_text) and ('역자' not in book_info_text):
translator = '없음'
translator_list.append(translator)
painter_str = book_info_text.find('그림')+3
painter_end = book_info_text.find('|', painter_str)
painter = book_info_text[painter_str:painter_end]
painter_list.append(painter)
publisher_str = painter_end+1
publisher_end = book_info_text.find(
'\n', publisher_str)
publisher = book_info_text[publisher_str:publisher_end]
publisher_list.append(publisher)
publish_date_str = publisher_end+2
publish_date_end = book_info_text.find(
'\n', publish_date_str)
publish_date = book_info_text[publish_date_str:publish_date_end]
publish_date_list.append(publish_date)
elif ('그림' not in book_info_text) and ('역자' in book_info_text):
painter = '없음'
painter_list.append(painter)
translator_str = book_info_text.find('역자')+3
translator_end = book_info_text.find(
'|', translator_str)
translator = book_info_text[translator_str:translator_end]
translator_list.append(translator)
publisher_str = translator_end+1
publisher_end = book_info_text.find(
'\n', publisher_str)
publisher = book_info_text[publisher_str:publisher_end]
publisher_list.append(publisher)
publish_date_str = publisher_end+2
publish_date_end = book_info_text.find(
'\n', publish_date_str)
publish_date = book_info_text[publish_date_str:publish_date_end]
publish_date_list.append(publish_date)
elif '그림' and '역자' not in book_info_text:
translator = '없음'
translator_list.append(translator)
painter = '없음'
painter_list.append(painter)
publisher_str = writer_end+1
publisher_end = book_info_text.find(
'\n', publisher_str)
publisher = book_info_text[publisher_str:publisher_end]
publisher_list.append(publisher)
publish_date_str = publisher_end+2
publish_date_end = book_info_text.find(
'\n', publish_date_str)
publish_date = book_info_text[publish_date_str:publish_date_end]
publish_date_list.append(publish_date)
elif '편집' in editor_exist:
if ('그림' in book_info_text) and ('편집' in book_info_text):
painter_str = book_info_text.find('그림')+3
painter_end = book_info_text.find('|', painter_str)
painter = book_info_text[painter_str:painter_end]
painter_list.append(painter)
translator_str = book_info_text.find('편집')+3
translator_end = book_info_text.find(
'|', translator_str)
translator = book_info_text[translator_str:translator_end]
translator_list.append(translator)
publisher_str = translator_end+1
publisher_end = book_info_text.find(
'\n', publisher_str)
publisher = book_info_text[publisher_str:publisher_end]
publisher_list.append(publisher)
publish_date_str = publisher_end+2
publish_date_end = book_info_text.find(
'\n', publish_date_str)
publish_date = book_info_text[publish_date_str:publish_date_end]
publish_date_list.append(publish_date)
elif ('그림' not in book_info_text) and ('편집' in book_info_text):
painter = '없음'
painter_list.append(painter)
translator_str = book_info_text.find('편집')+3
translator_end = book_info_text.find(
'|', translator_str)
translator = book_info_text[translator_str:translator_end]
translator_list.append(translator)
publisher_str = translator_end+1
publisher_end = book_info_text.find(
'\n', publisher_str)
publisher = book_info_text[publisher_str:publisher_end]
publisher_list.append(publisher)
publish_date_str = publisher_end+2
publish_date_end = book_info_text.find(
'\n', publish_date_str)
publish_date = book_info_text[publish_date_str:publish_date_end]
publish_date_list.append(publish_date)
ISBN_str = book_info_text.find('ISBN')+6
ISBN_end = book_info_text.find('|', ISBN_str)
if ISBN_end == -1:
ISBN_end = book_info_text.find('\n', ISBN_str)
ISBN = book_info_text[ISBN_str:ISBN_end]
if '\n' in ISBN:
ISBN = ISBN[0:int(ISBN.find('\n'))]
ISBN_list.append(ISBN)
content = ''
content = get_text_list(soup_det.select("div.book_cnt"))
if content == []:
content = ["없음"]
content_list.append(content)
else:
content_list.append(content)
for src in soup_det.find("div", class_="thumb_type").find_all("a"):
bookImage = src.find("img")["src"]
image_list.append(bookImage)
writer_a = soup_det.find("div", class_="book_info_inner").find_all("div")[
2].find_all("a")[:-1]
writer_book = soup_det.find(
"div", class_="book_info").find("a").text
writer_book_bid = soup_det.find("div", class_="book_info").find("a")[
"href"].split("=")[1]
for w in range(0, len(writer_a)):
writer_n = writer_a[w].text
writer_name.append(writer_n)
writer_href = writer_a[w]["href"]
writer_link.append(writer_href)
writer_split = writer_a[w]["href"].split("=")[3]
writer_num.append(writer_split)
writer_book_title.append(writer_book)
writer_bid.append(writer_book_bid)
writer_isbn.append(ISBN)
if i == 19:
break
if k == searchEnd:
print('검색어에 대한 도서 '+str(len(content_list))+'권 크롤링이 완료되었습니다.')
driver.quit()
break
book_list = []
for searchSubject, title, writer, translator, painter, publisher, publishDate, intro, content, authorIntro, categoryTop, categoryMiddle, categoryBottom, bid, ISBN, grade, review, image in zip(search_list, title_list, writer_list, translator_list, painter_list, publisher_list, publish_date_list, intro_list, content_list, author_intro_list, category_top_list, category_middle_list, category_bottom_list, bid_list, ISBN_list, grade_list, review_list, image_list):
book = {"searchSubject": searchSubject, "title": title, "writer": writer, "translator": translator, "painter": painter, "publisher": publisher, "publishDate": publishDate, "intro": intro, "content": content,
"authorIntro": authorIntro, "categoryTop": categoryTop, "categoryMiddle": categoryMiddle, "categoryBottom": categoryBottom, "bid": bid, "ISBN": ISBN, "grade": grade, "review": review, "image": image}
book_list.append(book)
writer_info_list = []
for num, name, bookTitle, bid, isbn, link in zip(writer_num, writer_name, writer_book_title, writer_bid, writer_isbn, writer_link):
writer_info = {"num": num, "name": name, "bookTitle": bookTitle,
"bid": bid, "isbn": isbn, "link": link}
writer_info_list.append(writer_info)
book_DF = pd.DataFrame(book_list)
writer_DF = pd.DataFrame(writer_info_list)
engine = create_engine(
"mysql+pymysql://root:[email protected]:3306/book?charset=utf8mb4", encoding='utf8')
conn = engine.connect()
book_DF.to_sql(name='book_crawling', con=engine,
if_exists='append', index=False)
writer_DF.to_sql(name='writer_info', con=engine,
if_exists='append', index=False)
conn.close()
return HttpResponseRedirect(reverse_lazy('index'))
def bidCrawling(request):
if request.method == 'POST':
form = bidCrawlingForm(request.POST)
if form.is_valid():
searchStartBid = form.cleaned_data['searchstartbid']
searchEndBid = form.cleaned_data['searchendbid']
bidSearchStart = int(searchStartBid)
bidSearchEnd = int(searchEndBid)
engine = create_engine(
"mysql+pymysql://root:[email protected]:3306/book?charset=utf8mb4", encoding='utf8')
torexe = os.popen(r'C:\Dev_program\Tor Browser\Browser\firefox.exe')
profile = FirefoxProfile(
r'C:\Dev_program\Tor Browser\Browser\TorBrowser\Data\Browser\profile.default')
profile.set_preference('network.proxy.type', 1)
profile.set_preference('network.proxy.socks', '127.0.0.1')
profile.set_preference('network.proxy.socks_port', 9050)
profile.set_preference("network.proxy.socks_remote_dns", False)
profile.update_preferences()
driver = webdriver.Firefox(
firefox_profile=profile, executable_path=r'C:\Dev_program\geckodriver.exe')
driver.implicitly_wait(time_to_wait=5)
searchCount = bidSearchEnd-bidSearchStart+1
searchMok = int(np.ceil((bidSearchEnd-bidSearchStart+1)/50))
taskId = 1
while searchMok > 0:
title_list = []
intro_list = []
author_intro_list = []
category_top_list = []
category_middle_list = []
category_bottom_list = []
ISBN_list = []
writer_list = []
translator_list = []
painter_list = []
publisher_list = []
publish_date_list = []
content_list = []
bid_list = []
image_list = []
grade_list = []
review_list = []
writer_name = []
writer_link = []
writer_num = []
writer_book_title = []
writer_bid = []
writer_isbn = []
taskId_list = []
taskContent_list = []
str_list = []
end_list = []
complete_list = []
errorDetail_list = []
crawlerNum_list = []
taskId_list.append(taskId)
if (bidSearchStart+49) < bidSearchEnd:
taskContent = str(bidSearchStart)+'~'+str(bidSearchStart+49)
else:
taskContent = str(bidSearchStart)+'~'+str(bidSearchEnd)
taskContent_list.append(taskContent)
str_now = datetime.datetime.now()
str_time = str_now.strftime('%Y-%m-%d %H:%M:%S')
str_list.append(str_time)
for i in range(bidSearchStart, bidSearchStart+50):
url_det = 'https://book.naver.com/bookdb/book_detail.nhn?bid=' + \
str(i)
try:
html_det = urlopen(url_det)
except (HTTPError, URLError, IndexError) as e:
errorDetail_list.append(e)
end_now = datetime.datetime.now()
end_time = end_now.strftime('%Y-%m-%d %H:%M:%S')
end_list.append(end_time)
complete_list.append('error')
crawlerNum_list.append(11)
task_list = []
for taskId, taskContent, str_time, end_time, complete, errorDetail, crawlerNum in zip(taskId_list, taskContent_list, str_list, end_list, complete_list, errorDetail_list, crawlerNum_list):
task = {"taskId": taskId, "taskContent": taskContent, "str_time": str_time, "end_time": end_time,
"complete": complete, "errorDetail": errorDetail, "crawlerNum": crawlerNum}
task_list.append(task)
task_DF = ''
task_DF = pd.DataFrame(task_list)
conn = engine.connect()
task_DF.to_sql(name='task', con=engine,
if_exists='append', index=False)
conn.close()
print(e)
print('에러로 크롤링 종료')
time.sleep(3)
if bidSearchStart+50 > bidSearchEnd:
driver.quit()
else:
continue
else:
soup_det = BeautifulSoup(html_det, "html.parser")
if '책정보, :' in soup_det.text:
print(str(i), "번 제외(삭제 서지)")
continue
else:
pass
book_info = soup_det.find('div', class_='book_info_inner')
try:
book_info_text = book_info.get_text()
except AttributeError:
print(str(i), "번 제외(Attr 에러)")
continue
if driver.current_url == 'https://nid.naver.com/nidlogin.login?svctype=128&a_version=2&viewtype=2&url=http://book.naver.com&surl=http://book.naver.com':
continue
else:
pass
try:
book_intro = soup_det.find('div', id='bookIntroContent')
book_intro_text = book_intro.get_text().replace('\n', '')
intro_list.append(book_intro_text)
except AttributeError:
book_intro_text = ''
intro_list.append(book_intro_text)
try:
author_intro = soup_det.find('div', id='authorIntroContent')
author_intro_text = author_intro.get_text().replace('\n', '')
author_intro_list.append(author_intro_text)
except AttributeError:
author_intro_text = ''
author_intro_list.append(author_intro_text)
try:
category_top = soup_det.find('li', class_='select')
category_top_text = category_top.get_text().replace('\n', '')
category_top_list.append(category_top_text)
except AttributeError:
category_top_text = ''
category_top_list.append(category_top_text)
try:
category_middle = soup_det.find('li', class_='select2')
category_middle_text = category_middle.get_text().replace('\n', '')
category_middle_list.append(category_middle_text)
except AttributeError:
category_middle_text = ''
category_middle_list.append(category_middle_text)
try:
category_bottom = soup_det.find('li', class_='select3')
category_bottom_text = category_bottom.get_text().replace('\n', '')
category_bottom_list.append(category_bottom_text)
except AttributeError:
category_bottom_text = ''
category_bottom_list.append(category_bottom_text)
try:
grade = soup_det.find("div", class_="txt_desc").find(
"strong").text[:-1]
except AttributeError:
grade = ''
grade_list.append(grade)
try:
review = soup_det.find(
"a", id="txt_desc_point").find_all("strong")[1].text
except AttributeError:
review = ''
review_list.append(review)
bookinfo_line1 = book_info.find_all("div")[2]
rel_name = bookinfo_line1.text
rel_list = []
for rel in bookinfo_line1.find_all("em"):
rel_cate = rel.text
rel_list.append(rel_cate)
for r in range(0, len(rel_list)):
rel_name = rel_name.replace(rel_list[r], '')
rel_name = rel_name.split('|')
publish_date = rel_name[-1]
if len(publish_date) == 4:
publish_date = publish_date + ".01.01"
elif len(publish_date) == 6:
publish_date = publish_date[:4]+"."+publish_date[4:]+".01"
elif publish_date == '':
publish_date = '2025.01.01'
if publish_date[0] != '1' and publish_date[0] != '2':
publish_date = '2025.01.01'
publish_date_list.append(publish_date)
publisher = rel_name[-2]
publisher_list.append(publisher)
rel_name = rel_name[1:-2]
rel_list = rel_list[1:]
if (len(rel_list) and len(rel_name)) == 2:
painter = rel_name[0].replace('\n', '')
translator = rel_name[1].replace('\n', '')
elif (len(rel_list) and len(rel_name)) == 1:
if '역자' in rel_list:
translator = rel_name[0].replace('\n', '')
painter = ''
else:
translator = ''
painter = rel_name[0].replace('\n', '')
else:
translator = ''
painter = ''
translator_list.append(translator)
painter_list.append(painter)
ISBN_str = book_info_text.find('ISBN')+6
ISBN_end = book_info_text.find('|', ISBN_str)
if ISBN_end == -1:
ISBN_end = book_info_text.find('\n', ISBN_str)
ISBN = book_info_text[ISBN_str:ISBN_end]
if '\n' in ISBN:
ISBN = ISBN[0:int(ISBN.find('\n'))]
ISBN_list.append(ISBN)
content = ''
content = get_text_list(soup_det.select("div.book_cnt"))
if content == []:
content = [""]
content_list.append(content)
else:
content_list.append(content)
for src in soup_det.find("div", class_="thumb_type").find_all("a"):
bookImage = src.find("img")["src"]
image_list.append(bookImage)
writer_a = soup_det.find("div", class_="book_info_inner").find_all("div")[
2].find_all("a")[:-1]
writer_book = soup_det.find(
"div", class_="book_info").find("a").text
writer_book_bid = soup_det.find("div", class_="book_info").find("a")[
"href"].split("=")[1]
bid_list.append(writer_book_bid)
title_list.append(writer_book)
writer = soup_det.find("div", class_="book_info_inner").find_all("div")[
2].text.split("|")[0][3:].strip()
writer_list.append(writer)
for w in range(0, len(writer_a)):
writer_n = writer_a[w].text
writer_name.append(writer_n)
writer_href = writer_a[w]["href"]
writer_link.append(writer_href)
writer_split = writer_a[w]["href"].split("=")[3]
writer_num.append(writer_split)
writer_book_title.append(writer_book)
writer_bid.append(writer_book_bid)
writer_isbn.append(ISBN)
time.sleep(round(np.random.uniform(0.5, 1.4), 2))
if i == bidSearchEnd:
print('bid 번호에 대한 도서 '+str(searchCount)+'권 크롤링이 완료되었습니다.')
driver.quit()
break
book_list = []
book_DF = ''
for title, writer, translator, painter, publisher, publishDate, intro, content, authorIntro, categoryTop, categoryMiddle, categoryBottom, bid, ISBN, grade, review, image in zip(title_list, writer_list, translator_list, painter_list, publisher_list, publish_date_list, intro_list, content_list, author_intro_list, category_top_list, category_middle_list, category_bottom_list, bid_list, ISBN_list, grade_list, review_list, image_list):
book = {"title": title, "writer": writer, "translator": translator, "painter": painter, "publisher": publisher, "publishDate": publishDate, "intro": intro, "content": content,
"authorIntro": authorIntro, "categoryTop": categoryTop, "categoryMiddle": categoryMiddle, "categoryBottom": categoryBottom, "bid": bid, "ISBN": ISBN, "grade": grade, "review": review, "image": image}
book_list.append(book)
book_DF = | pd.DataFrame(book_list) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import math
import os
import random
import sys
import threading
import pandas as pd
cmd_subfolder = os.path.dirname(os.path.abspath(__file__))
while not cmd_subfolder .endswith('pydeepgenomics'):
cmd_subfolder = os.path.dirname(cmd_subfolder)
cmd_subfolder = os.path.dirname(cmd_subfolder)
try:
from pydeepgenomics.tools import generaltools as gt
from pydeepgenomics.preprocess import settings
except ImportError:
if cmd_subfolder not in sys.path:
sys.path.append(cmd_subfolder)
from pydeepgenomics.tools import generaltools as gt
from pydeepgenomics.preprocess import settings
# def write_encoded_output(
# path_data,
# chromosome,
# dataframe,
# sve,
# liste_names,
# namedir="floatfiles"):
# for i in range(len(liste_names)):
# # First allele encoding
# # REF
# dataframe.loc[
# ((dataframe.REF == "A") &
# (dataframe.loc[:, liste_names[i]].str[0] == "0")),
# "output" + liste_names[i]] = sve[0]
# dataframe.loc[
# ((dataframe.REF == "T") &
# (dataframe.loc[:, liste_names[i]].str[0] == "0")),
# "output" + liste_names[i]] = sve[1]
# dataframe.loc[
# ((dataframe.REF == "G") &
# (dataframe.loc[:, liste_names[i]].str[0] == "0")),
# "output" + liste_names[i]] = sve[2]
# dataframe.loc[
# ((dataframe.REF == "C") &
# (dataframe.loc[:, liste_names[i]].str[0] == "0")),
# "output" + liste_names[i]] = sve[3]
# # ALT
# dataframe.loc[
# ((dataframe.ALT == "A") &
# (dataframe.loc[:, liste_names[i]].str[0] == "1")),
# "output" + liste_names[i]] = sve[0]
# dataframe.loc[
# ((dataframe.ALT == "T") &
# (dataframe.loc[:, liste_names[i]].str[0] == "1")),
# "output" + liste_names[i]] = sve[1]
# dataframe.loc[
# ((dataframe.ALT == "G") &
# (dataframe.loc[:, liste_names[i]].str[0] == "1")),
# "output" + liste_names[i]] = sve[2]
# dataframe.loc[
# ((dataframe.ALT == "C") &
# (dataframe.loc[:, liste_names[i]].str[0] == "1")),
# "output" + liste_names[i]] = sve[3]
#
# # Second allele encoding
# # REF
# dataframe.loc[
# ((dataframe.REF == "A") &
# (dataframe.loc[:, liste_names[i]].str[-1] == "0")),
# "output" + liste_names[i]] += sve[4]
# dataframe.loc[
# ((dataframe.REF == "T") &
# (dataframe.loc[:, liste_names[i]].str[-1] == "0")),
# "output" + liste_names[i]] += sve[5]
# dataframe.loc[
# ((dataframe.REF == "G") &
# (dataframe.loc[:, liste_names[i]].str[-1] == "0")),
# "output" + liste_names[i]] += sve[6]
# dataframe.loc[
# ((dataframe.REF == "C") &
# (dataframe.loc[:, liste_names[i]].str[-1] == "0")),
# "output" + liste_names[i]] += sve[7]
# # ALT
# dataframe.loc[
# ((dataframe.ALT == "A") &
# (dataframe.loc[:, liste_names[i]].str[-1] == "1")),
# "output" + liste_names[i]] += sve[4]
# dataframe.loc[
# ((dataframe.ALT == "T") &
# (dataframe.loc[:, liste_names[i]].str[-1] == "1")),
# "output" + liste_names[i]] += sve[5]
# dataframe.loc[
# ((dataframe.ALT == "G") &
# (dataframe.loc[:, liste_names[i]].str[-1] == "1")),
# "output" + liste_names[i]] += sve[6]
# dataframe.loc[
# ((dataframe.ALT == "C") &
# (dataframe.loc[:, liste_names[i]].str[-1] == "1")),
# "output" + liste_names[i]] += sve[7]
#
# # Add position
# dataframe.loc[:, "output" + liste_names[i]] += dataframe.POS
#
# # Write files
#
# if len(liste_names) > 1:
# jobs = []
# for i in range(len(liste_names)):
# thread = threading.Thread(target=save_samples(
# path_data,
# chromosome,
# dataframe,
# liste_names,
# i,
# name_dir=namedir))
# jobs.append(thread)
# for j in jobs:
# j.start()
# for j in jobs:
# j.join()
# else:
# save_samples(
# path_data,
# chromosome,
# dataframe,
# liste_names,
# 0,
# name_dir=namedir)
#
def do_conversion(dataframe,
list_names,
encoding_dict=settings.NUCLEOTIDE_LABELS_bin,
output_conversion="to_int"):
for i in range(len(list_names)):
# To put it in a nutshell from left to right -->
#
# Two bits to tell if first allele is A, T, C or G
# two bits as above but for the second allele
# The following bits are dedicated to the position
# First allele encoding
for nucleotide, bit_value in encoding_dict.items():
if output_conversion == "to_int":
dataframe.loc[:, "output" + list_names[i]] = "1"
else:
dataframe.loc[:, "output" + list_names[i]] = ""
# REF
dataframe.loc[
(
(dataframe.REF == nucleotide) &
(dataframe.loc[:, list_names[i]].str[0] == "0")
), "output" + list_names[i]] += bit_value
# ALT
dataframe.loc[
(
(dataframe.ALT == nucleotide) &
(dataframe.loc[:, list_names[i]].str[0] == "1")
), "output" + list_names[i]] += bit_value
# Second allele encoding
for nucleotide, bit_value in encoding_dict.items():
# REF
dataframe.loc[
(
(dataframe.REF == nucleotide) &
(dataframe.loc[:, list_names[i]].str[-1] == "0")
), "output" + list_names[i]] += bit_value
# ALT
dataframe.loc[
(
(dataframe.ALT == nucleotide) &
(dataframe.loc[:, list_names[i]].str[-1] == "1")
), "output" + list_names[i]] += bit_value
# Add position
dataframe.loc[:, "output" + list_names[i]] += \
dataframe.POS.apply(lambda x: "{0:01b}".format(x))
if output_conversion == "to_int":
dataframe.loc[:, "output" + list_names[i]] = \
dataframe.loc[
:,
"output" + list_names[i]
].apply(lambda x: int(x, 2))
return dataframe
def write_encoded_output(
path_data,
chromosome,
dataframe,
list_names,
namedir="floatfiles"):
# Write files
if len(list_names) > 1:
jobs = []
for i in range(len(list_names)):
thread = threading.Thread(target=save_samples(
path_data,
chromosome,
dataframe,
list_names,
i,
name_dir=namedir))
jobs.append(thread)
for j in jobs:
j.start()
for j in jobs:
j.join()
else:
save_samples(
path_data,
chromosome,
dataframe,
list_names,
0,
name_dir=namedir)
#def decode_position(to_test, ln=settings.LN, fbp=settings.FBP):
#
# enc_al1 = fbp
# enc_al2 = fbp * math.pow(2, 4)
# position = 0
# _iter = 20
# al1 = al2 = "N"
#
# print("to_test", to_test)
# print("ln", ln)
# print("fbp", fbp)
# while (
# (to_test - enc_al2 - enc_al1 - position != 0) and
# (enc_al1 <= math.pow(2, 33) and (enc_al2 <= math.pow(2, 37))) and
# (_iter > 0)
# ):
#
# if (
# (enc_al2 * 2 < to_test) and
# (enc_al1 == math.pow(2, 28)) and
# (position == 0)
# ):
#
# enc_al2 *= 2
# al2 = ln[enc_al2]
# elif (enc_al1 * 2 + enc_al2 < to_test) and (position == 0):
# enc_al1 *= 2
# al1 = ln[enc_al1]
# elif to_test - enc_al1 - enc_al2 < fbp:
# position = int(to_test - enc_al1 - enc_al2)
# _iter -= 1
#
# if _iter <= 0:
# position = -1
# return al1[0], al2[0], position
def decode_position(
to_test,
decoding_dict=settings.LN,
fbp=settings.FBP,
snps_value_encoded=settings.SVE,
max_iter=20):
enc_al1 = snps_value_encoded[0]
enc_al2 = snps_value_encoded[4]
position = 0
al1 = al2 = "N"
while (
(to_test - enc_al2 - enc_al1 - position != 0) and
(enc_al1 <= math.pow(2, 33) and (enc_al2 <= math.pow(2, 37))) and
(max_iter > 0)
):
if (
(enc_al2 * 2 < to_test) and
(enc_al1 == math.pow(2, 28)) and
(position == 0)
):
enc_al2 *= 2
al2 = decoding_dict[enc_al2]
elif (enc_al1 * 2 + enc_al2 < to_test) and (position == 0):
enc_al1 *= 2
al1 = decoding_dict[enc_al1]
elif to_test - enc_al1 - enc_al2 < fbp:
position = int(to_test - enc_al1 - enc_al2)
max_iter -= 1
if max_iter <= 0:
position = -1
return al1[0], al2[0], position
def decode_position_int(
to_test,
decoding_dict=settings.REVERSE_NUCLEOTIDE_LABELS_bin):
al1 = decoding_dict['{0:01b}'.format(to_test)[1:3]]
al2 = decoding_dict['{0:01b}'.format(to_test)[3: 5]]
position = int('{0:01b}'.format(to_test)[5:], 2)
return al1, al2, position
def decode_position_bin(
to_test,
decoding_dict=settings.REVERSE_NUCLEOTIDE_LABELS_bin):
al1 = decoding_dict[to_test[:2]]
al2 = decoding_dict[to_test[2: 4]]
position = int(to_test[4:], 2)
return al1, al2, position
def save_samples(
path_data,
chromosome,
dataframe,
list_names,
i,
name_dir="floatfiles"):
# Save
dataframe.loc[:, ["output" + list_names[0]]].to_csv(
os.path.join(path_data, name_dir, chromosome, list_names[i]+".txt.gz"),
index=False,
header=False,
compression="gzip")
def _build_output_tree_structure(
path_to_output,
name_output_dir,
chromosome_name):
if not os.path.isdir(os.path.join(path_to_output, name_output_dir)):
os.mkdir(os.path.join(path_to_output, name_output_dir))
if not os.path.isdir(
os.path.join(path_to_output, name_output_dir, chromosome_name)):
os.mkdir(
os.path.join(path_to_output, name_output_dir, chromosome_name))
def encode_file_positions(
chr_to_be_processed,
path_to_data,
path_to_output,
name_output_dir="encoded_files",
verbose=True,
printing=True,
logging=False):
# Start timer
timer = gt.time_since_first_call()
next(timer)
print_parameters = {
"verbose": verbose,
"printing": printing,
"logging": logging,
"in_loop": False
}
gt.custom_output(
"Function {0} started at {1}".format(
encode_file_positions.__name__,
str(datetime.datetime.now())) +
"\nProcessing files in {0}:".format(path_to_data), **print_parameters)
chromosome_name = str(chr_to_be_processed)
_build_output_tree_structure(
path_to_output,
name_output_dir,
chromosome_name)
# load the meta data in a pandas data frame
_meta = pd.read_csv(
os.path.join(path_to_data, "_meta.txt.gz"),
sep="\t",
index_col=False)
list_files = gt.list_elements(
path_to_data,
extension=".txt.gz",
exception=[
os.path.join(path_to_data, "_meta.txt.gz"),
os.path.join(path_to_data, "_comments.txt.gz")])
nb_processed_files = 0
batch_iter = 0
list_ = []
df = _meta.drop(["#CHROM", "ID", "QUAL", "FILTER", "INFO", "FORMAT"], 1)
for files in list_files:
print_parameters["in_loop"] = True
sample_name = files.split("/")[-1].split(".")[0].split("_")[-1]
list_.append(sample_name)
df[sample_name] = | pd.read_csv(files, index_col=None, header=None) | pandas.read_csv |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Utilities to deal with types. This is mostly focused on python3.
"""
import datetime
import decimal
import sys
import typing
from collections.abc import Iterable
from distutils.version import LooseVersion
from inspect import isclass
from typing import Any, Callable, Generic, List, Tuple, Union, Type, get_type_hints
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype, pandas_dtype # type: ignore[attr-defined]
from pandas.api.extensions import ExtensionDtype
extension_dtypes: Tuple[type, ...]
try:
from pandas import Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype
extension_dtypes_available = True
extension_dtypes = (Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype)
try:
from pandas import BooleanDtype, StringDtype
extension_object_dtypes_available = True
extension_dtypes += (BooleanDtype, StringDtype)
except ImportError:
extension_object_dtypes_available = False
try:
from pandas import Float32Dtype, Float64Dtype
extension_float_dtypes_available = True
extension_dtypes += (Float32Dtype, Float64Dtype)
except ImportError:
extension_float_dtypes_available = False
except ImportError:
extension_dtypes_available = False
extension_object_dtypes_available = False
extension_float_dtypes_available = False
extension_dtypes = ()
import pyarrow as pa
import pyspark.sql.types as types
from pyspark.sql.pandas.types import to_arrow_type, from_arrow_type
# For running doctests and reference resolution in PyCharm.
from pyspark import pandas as ps # noqa: F401
from pyspark.pandas._typing import Dtype, T
if typing.TYPE_CHECKING:
from pyspark.pandas.internal import InternalField
# A column of data, with the data type.
class SeriesType(Generic[T]):
def __init__(self, dtype: Dtype, spark_type: types.DataType):
self.dtype = dtype
self.spark_type = spark_type
def __repr__(self) -> str:
return "SeriesType[{}]".format(self.spark_type)
class DataFrameType:
def __init__(
self,
index_fields: List["InternalField"],
data_fields: List["InternalField"],
):
self.index_fields = index_fields
self.data_fields = data_fields
self.fields = index_fields + data_fields
@property
def dtypes(self) -> List[Dtype]:
return [field.dtype for field in self.fields]
@property
def spark_type(self) -> types.StructType:
return types.StructType([field.struct_field for field in self.fields])
def __repr__(self) -> str:
return "DataFrameType[{}]".format(self.spark_type)
# The type is a scalar type that is furthermore understood by Spark.
class ScalarType:
def __init__(self, dtype: Dtype, spark_type: types.DataType):
self.dtype = dtype
self.spark_type = spark_type
def __repr__(self) -> str:
return "ScalarType[{}]".format(self.spark_type)
# The type is left unspecified or we do not know about this type.
class UnknownType:
def __init__(self, tpe: Any):
self.tpe = tpe
def __repr__(self) -> str:
return "UnknownType[{}]".format(self.tpe)
class IndexNameTypeHolder:
name = None
tpe = None
short_name = "IndexNameType"
class NameTypeHolder:
name = None
tpe = None
short_name = "NameType"
def as_spark_type(
tpe: Union[str, type, Dtype], *, raise_error: bool = True, prefer_timestamp_ntz: bool = False
) -> types.DataType:
"""
Given a Python type, returns the equivalent spark type.
Accepts:
- the built-in types in Python
- the built-in types in numpy
- list of pairs of (field_name, type)
- dictionaries of field_name -> type
- Python3's typing system
"""
# For NumPy typing, NumPy version should be 1.21+ and Python version should be 3.8+
if sys.version_info >= (3, 8) and LooseVersion(np.__version__) >= LooseVersion("1.21"):
if (
hasattr(tpe, "__origin__")
and tpe.__origin__ is np.ndarray # type: ignore[union-attr]
and hasattr(tpe, "__args__")
and len(tpe.__args__) > 1 # type: ignore[union-attr]
):
# numpy.typing.NDArray
return types.ArrayType(
as_spark_type(
tpe.__args__[1].__args__[0], raise_error=raise_error # type: ignore[union-attr]
)
)
if isinstance(tpe, np.dtype) and tpe == np.dtype("object"):
pass
# ArrayType
elif tpe in (np.ndarray,):
return types.ArrayType(types.StringType())
elif hasattr(tpe, "__origin__") and issubclass(
tpe.__origin__, list # type: ignore[union-attr]
):
element_type = as_spark_type(
tpe.__args__[0], raise_error=raise_error # type: ignore[union-attr]
)
if element_type is None:
return None
return types.ArrayType(element_type)
# BinaryType
elif tpe in (bytes, np.character, np.bytes_, np.string_):
return types.BinaryType()
# BooleanType
elif tpe in (bool, np.bool_, "bool", "?"):
return types.BooleanType()
# DateType
elif tpe in (datetime.date,):
return types.DateType()
# NumericType
elif tpe in (np.int8, np.byte, "int8", "byte", "b"):
return types.ByteType()
elif tpe in (decimal.Decimal,):
# TODO: considering about the precision & scale for decimal type.
return types.DecimalType(38, 18)
elif tpe in (float, np.float_, np.float64, "float", "float64", "double"):
return types.DoubleType()
elif tpe in (np.float32, "float32", "f"):
return types.FloatType()
elif tpe in (np.int32, "int32", "i"):
return types.IntegerType()
elif tpe in (int, np.int64, "int", "int64", "long"):
return types.LongType()
elif tpe in (np.int16, "int16", "short"):
return types.ShortType()
# StringType
elif tpe in (str, np.unicode_, "str", "U"):
return types.StringType()
# TimestampType or TimestampNTZType if timezone is not specified.
elif tpe in (datetime.datetime, np.datetime64, "datetime64[ns]", "M"):
return types.TimestampNTZType() if prefer_timestamp_ntz else types.TimestampType()
# DayTimeIntervalType
elif tpe in (datetime.timedelta, np.timedelta64, "timedelta64[ns]"):
return types.DayTimeIntervalType()
# categorical types
elif isinstance(tpe, CategoricalDtype) or (isinstance(tpe, str) and type == "category"):
return types.LongType()
# extension types
elif extension_dtypes_available:
# IntegralType
if isinstance(tpe, Int8Dtype) or (isinstance(tpe, str) and tpe == "Int8"):
return types.ByteType()
elif isinstance(tpe, Int16Dtype) or (isinstance(tpe, str) and tpe == "Int16"):
return types.ShortType()
elif isinstance(tpe, Int32Dtype) or (isinstance(tpe, str) and tpe == "Int32"):
return types.IntegerType()
elif isinstance(tpe, Int64Dtype) or (isinstance(tpe, str) and tpe == "Int64"):
return types.LongType()
if extension_object_dtypes_available:
# BooleanType
if isinstance(tpe, BooleanDtype) or (isinstance(tpe, str) and tpe == "boolean"):
return types.BooleanType()
# StringType
elif isinstance(tpe, StringDtype) or (isinstance(tpe, str) and tpe == "string"):
return types.StringType()
if extension_float_dtypes_available:
# FractionalType
if isinstance(tpe, Float32Dtype) or (isinstance(tpe, str) and tpe == "Float32"):
return types.FloatType()
elif isinstance(tpe, Float64Dtype) or (isinstance(tpe, str) and tpe == "Float64"):
return types.DoubleType()
if raise_error:
raise TypeError("Type %s was not understood." % tpe)
else:
return None
def spark_type_to_pandas_dtype(
spark_type: types.DataType, *, use_extension_dtypes: bool = False
) -> Dtype:
"""Return the given Spark DataType to pandas dtype."""
if use_extension_dtypes and extension_dtypes_available:
# IntegralType
if isinstance(spark_type, types.ByteType):
return Int8Dtype()
elif isinstance(spark_type, types.ShortType):
return Int16Dtype()
elif isinstance(spark_type, types.IntegerType):
return Int32Dtype()
elif isinstance(spark_type, types.LongType):
return Int64Dtype()
if extension_object_dtypes_available:
# BooleanType
if isinstance(spark_type, types.BooleanType):
return BooleanDtype()
# StringType
elif isinstance(spark_type, types.StringType):
return StringDtype()
# FractionalType
if extension_float_dtypes_available:
if isinstance(spark_type, types.FloatType):
return | Float32Dtype() | pandas.Float32Dtype |
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
df.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
df.info(buf=buf, null_counts=True)
assert "non-null" in buf.getvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
df.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.max_seq_items", 1):
assert len(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("display.max_rows", None):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("display.max_rows", None):
with option_context("display.max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({"unicode": unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{"b": ["あ", "いいい", "¡¡", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_with_column_specific_col_space_raises(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
msg = (
"Col_space length\\(\\d+\\) should match "
"DataFrame number of columns\\(\\d+\\)"
)
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40])
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40, 50, 60])
msg = "unknown column"
with pytest.raises(ValueError, match=msg):
df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
def test_to_string_with_column_specific_col_space(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
# 3 separating space + each col_space for (id, a, b, c)
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
result = df.to_string(col_space=[10, 11, 12])
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
def test_to_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (has_horizontally_truncated_repr(df))
with option_context(
"display.max_rows", 15, "display.max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with option_context("display.max_rows", 8):
result = str(s)
assert "object" in result
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9})
with option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
Timestamp("2011-01-01", tz="US/Eastern")
] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00+09:00 1\n"
"1 2011-01-01 00:00:00+09:00 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
df = DataFrame({"A": range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split("\n")
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({"c/\u03c3": Series({"test": np.nan})})
str(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath("io", "parser", "data", "unicode_series.csv")
df = read_csv(filepath, header=None, encoding="latin1")
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({"foo": [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
fmt.set_option("display.max_rows", 1)
df = DataFrame(columns=["a", "b", "c"], index=index)
repr(df)
repr(df.T)
fmt.set_option("display.max_rows", 200)
def test_wide_repr(self):
with option_context(
"mode.sim_interactive",
True,
"display.show_dimensions",
True,
"display.max_columns",
20,
):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
assert f"10 rows x {max_cols - 1} columns" in rep_str
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_columns(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
df = DataFrame(
np.random.randn(5, 3), columns=["a" * 90, "b" * 90, "c" * 90]
)
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = "DataFrame Index"
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "DataFrame Index" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)), index=midx)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "Level 0 Level 1" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex_cols(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(
tm.rands_array(25, (10, max_cols - 1)), index=midx, columns=mcols
)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150, "display.max_columns", 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_unicode(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_long_columns(self):
with option_context("mode.sim_interactive", True):
df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
result = repr(df)
assert "ccccc" in result
assert "ddddd" in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=[f"s{x:04d}" for x in range(n)],
dtype="int64",
)
import re
str_rep = str(s)
nmatches = len(re.findall("dtype", str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame(
{
"id1": {0: "1a3", 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: "78d", 1: "79d"},
"value": {0: 123, 1: 64},
}
)
# multi-index
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# index
y = df.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nd67 9h4 79d 64"
)
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(["id1", "id2"]).set_index("id3", append=True)
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nNaN 9h4 79d 64"
)
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index(["id2", "id3"])
result = y.to_string()
expected = (
" id1 value\nid2 id3 \n"
"NaN 78d 1a3 123\n 79d 9h4 64"
)
assert result == expected
df = DataFrame(
{
"id1": {0: np.nan, 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: np.nan, 1: "79d"},
"value": {0: 123, 1: 64},
}
)
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"NaN NaN NaN 123\n9h4 d67 79d 64"
)
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)},
index=np.arange(200),
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
# print in right order
result = biggie.to_string(
columns=["B", "A"], col_space=17, float_format="%.5f".__mod__
)
lines = result.split("\n")
header = lines[0].strip().split()
joined = "\n".join([re.sub(r"\s+", " ", x).strip() for x in lines[1:]])
recons = read_csv(StringIO(joined), names=header, header=None, sep=" ")
tm.assert_series_equal(recons["B"], biggie["B"])
assert recons["A"].count() == biggie["A"].count()
assert (np.abs(recons["A"].dropna() - biggie["A"].dropna()) < 0.1).all()
# expected = ['B', 'A']
# assert header == expected
result = biggie.to_string(columns=["A"], col_space=17)
header = result.split("\n")[0].strip().split()
expected = ["A"]
assert header == expected
biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
biggie.to_string(columns=["B", "A"], float_format=str)
biggie.to_string(columns=["B", "A"], col_space=12, float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=["X", "Y"])
expected = " X Y\n0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
msg = "Writing 2 cols but got 1 aliases"
with pytest.raises(ValueError, match=msg):
df.to_string(header=["X"])
def test_to_string_no_index(self):
# GH 16839, GH 13032
df = DataFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]})
df_s = df.to_string(index=False)
# Leading space is expected for positive numbers.
expected = " x y z\n11 33 AAA\n22 -44 "
assert df_s == expected
df_s = df[["y", "x", "z"]].to_string(index=False)
expected = " y x z\n 33 11 AAA\n-44 22 "
assert df_s == expected
def test_to_string_line_width_no_index(self):
# GH 13998, GH 22505
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
assert df_s == expected
def test_to_string_float_formatting(self):
tm.reset_display_options()
fmt.set_option(
"display.precision",
5,
"display.column_space",
12,
"display.notebook_repr_html",
False,
)
df = DataFrame(
{"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]}
)
df_s = df.to_string()
if _three_digit_exp():
expected = (
" x\n0 0.00000e+000\n1 2.50000e-001\n"
"2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n"
"5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n"
"8 -1.00000e+006"
)
else:
expected = (
" x\n0 0.00000e+00\n1 2.50000e-01\n"
"2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n"
"5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n"
"8 -1.00000e+06"
)
assert df_s == expected
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string()
expected = " x\n0 3234.000\n1 0.253"
assert df_s == expected
tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({"x": [1e9, 0.2512]})
df_s = df.to_string()
if _three_digit_exp():
expected = " x\n0 1.000000e+009\n1 2.512000e-001"
else:
expected = " x\n0 1.000000e+09\n1 2.512000e-01"
assert df_s == expected
def test_to_string_float_format_no_fixed_width(self):
# GH 21625
df = DataFrame({"x": [0.19999]})
expected = " x\n0 0.200"
assert df.to_string(float_format="%.3f") == expected
# GH 22270
df = DataFrame({"x": [100.0]})
expected = " x\n0 100"
assert df.to_string(float_format="%.0f") == expected
def test_to_string_small_float_values(self):
df = DataFrame({"a": [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if _three_digit_exp():
expected = (
" a\n"
"0 1.500000e+000\n"
"1 1.000000e-017\n"
"2 -5.500000e-007"
)
else:
expected = (
" a\n"
"0 1.500000e+00\n"
"1 1.000000e-17\n"
"2 -5.500000e-07"
)
assert result == expected
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = " 0\n0 0\n1 0\n2 -0"
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.arange(5), index=index)
result = df.to_string()
expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4"
assert result == expected
def test_to_string_complex_float_formatting(self):
# GH #25514, 25745
with option_context("display.precision", 5):
df = DataFrame(
{
"x": [
(0.4467846931321966 + 0.0715185102060818j),
(0.2739442392974528 + 0.23515228785438969j),
(0.26974928742135185 + 0.3250604054898979j),
(-1j),
]
}
)
result = df.to_string()
expected = (
" x\n0 0.44678+0.07152j\n"
"1 0.27394+0.23515j\n"
"2 0.26975+0.32506j\n"
"3 -0.00000-1.00000j"
)
assert result == expected
def test_to_string_ascii_error(self):
data = [
(
"0 ",
" .gitignore ",
" 5 ",
" \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2",
)
]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({"x": [-15, 20, 25, -35]})
assert issubclass(df["x"].dtype.type, np.integer)
output = df.to_string()
expected = " x\n0 -15\n1 20\n2 25\n3 -35"
assert output == expected
def test_to_string_index_formatter(self):
df = DataFrame([range(5), range(5, 10), range(10, 15)])
rs = df.to_string(formatters={"__index__": lambda x: "abc"[x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
assert rs == xp
def test_to_string_left_justify_cols(self):
tm.reset_display_options()
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string(justify="left")
expected = " x \n0 3234.000\n1 0.253"
assert df_s == expected
def test_to_string_format_na(self):
tm.reset_display_options()
df = DataFrame(
{
"A": [np.nan, -1, -2.1234, 3, 4],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0000 foo\n"
"2 -2.1234 foooo\n"
"3 3.0000 fooooo\n"
"4 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [np.nan, -1.0, -2.0, 3.0, 4.0],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0 foo\n"
"2 -2.0 foooo\n"
"3 3.0 fooooo\n"
"4 4.0 bar"
)
assert result == expected
def test_to_string_format_inf(self):
# Issue #24861
tm.reset_display_options()
df = DataFrame(
{
"A": [-np.inf, np.inf, -1, -2.1234, 3, 4],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0000 foo\n"
"3 -2.1234 foooo\n"
"4 3.0000 fooooo\n"
"5 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [-np.inf, np.inf, -1.0, -2.0, 3.0, 4.0],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0 foo\n"
"3 -2.0 foooo\n"
"4 3.0 fooooo\n"
"5 4.0 bar"
)
assert result == expected
def test_to_string_decimal(self):
# Issue #23614
df = DataFrame({"A": [6.0, 3.1, 2.2]})
expected = " A\n0 6,0\n1 3,1\n2 2,2"
assert df.to_string(decimal=",") == expected
def test_to_string_line_width(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
s = df.to_string(line_width=80)
assert max(len(line) for line in s.split("\n")) == 80
def test_show_dimensions(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
True,
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
False,
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
with option_context(
"display.max_rows",
2,
"display.max_columns",
2,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
def test_repr_html(self, float_frame):
df = float_frame
df._repr_html_()
fmt.set_option("display.max_rows", 1, "display.max_columns", 1)
df._repr_html_()
fmt.set_option("display.notebook_repr_html", False)
df._repr_html_()
tm.reset_display_options()
df = | DataFrame([[1, 2], [3, 4]]) | pandas.DataFrame |
# coding: utf-8
from __future__ import division
import numpy as np
import pandas as pd
from sklearn import metrics
import lightgbm as lgb
import time
from multiprocessing import cpu_count
import warnings
warnings.filterwarnings('ignore')
# Constants define
ROOT_PATH = '../'
ONLINE = 1
target = 'label'
train_len = 4999
threshold = 0.5
########################################### Helper function ###########################################
def log(info):
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' ' + str(info))
def merge_feat_count(df, df_feat, columns_groupby, new_column_name, type='int'):
df_count = pd.DataFrame(df_feat.groupby(columns_groupby).size()).fillna(0).astype(type).reset_index()
df_count.columns = columns_groupby + [new_column_name]
df = df.merge(df_count, on=columns_groupby, how='left')
return df, [new_column_name]
def merge_feat_onehot_count(df, df_feat, columns_groupby, prefix, type='int'):
df_count = df_feat.groupby(columns_groupby).size().unstack().fillna(0).astype(type).reset_index()
df_count.columns = [i if i == columns_groupby[0] else prefix + '_' + str(i) for i in df_count.columns]
df = df.merge(df_count, on=columns_groupby[0], how='left')
return df, list(np.delete(df_count.columns.values, 0))
def merge_feat_nunique(df, df_feat, columns_groupby, column, new_column_name, type='int'):
df_nunique = pd.DataFrame(df_feat.groupby(columns_groupby)[column].nunique()).fillna(0).astype(type).reset_index()
df_nunique.columns = columns_groupby + [new_column_name]
df = df.merge(df_nunique, on=columns_groupby, how='left')
return df, [new_column_name]
def merge_feat_min(df, df_feat, columns_groupby, column, new_column_name, type='float'):
df_min = pd.DataFrame(df_feat.groupby(columns_groupby)[column].min()).fillna(0).astype(type).reset_index()
df_min.columns = columns_groupby + [new_column_name]
df = df.merge(df_min, on=columns_groupby, how='left')
return df, [new_column_name]
def merge_feat_max(df, df_feat, columns_groupby, column, new_column_name, type='float'):
df_max = pd.DataFrame(df_feat.groupby(columns_groupby)[column].max()).fillna(0).astype(type).reset_index()
df_max.columns = columns_groupby + [new_column_name]
df = df.merge(df_max, on=columns_groupby, how='left')
return df, [new_column_name]
def merge_feat_mean(df, df_feat, columns_groupby, column, new_column_name, type='float'):
df_mean = pd.DataFrame(df_feat.groupby(columns_groupby)[column].mean()).fillna(0).astype(type).reset_index()
df_mean.columns = columns_groupby + [new_column_name]
df = df.merge(df_mean, on=columns_groupby, how='left')
return df, [new_column_name]
def eval_auc_f1(preds, dtrain):
df = pd.DataFrame({'y_true': dtrain.get_label(), 'y_score': preds})
df['y_pred'] = df['y_score'].apply(lambda x: 1 if x >= threshold else 0)
auc = metrics.roc_auc_score(df.y_true, df.y_score)
f1 = metrics.f1_score(df.y_true, df.y_pred)
return 'feval', (auc * 0.6 + f1 * 0.4), True
def lgb_cv(train_x, train_y, params, rounds, folds):
start = time.clock()
log(str(train_x.columns))
dtrain = lgb.Dataset(train_x, label=train_y)
log('run cv: ' + 'round: ' + str(rounds))
res = lgb.cv(params, dtrain, rounds, nfold=folds,
metrics=['eval_auc_f1', 'auc'], feval=eval_auc_f1,
early_stopping_rounds=200, verbose_eval=5)
elapsed = (time.clock() - start)
log('Time used:' + str(elapsed) + 's')
return len(res['feval-mean']), res['feval-mean'][len(res['feval-mean']) - 1], res['auc-mean'][len(res['auc-mean']) - 1]
def lgb_train_predict(train_x, train_y, test_x, params, rounds):
start = time.clock()
log(str(train_x.columns))
dtrain = lgb.Dataset(train_x, label=train_y)
valid_sets = [dtrain]
model = lgb.train(params, dtrain, rounds, valid_sets, feval=eval_auc_f1, verbose_eval=5)
pred = model.predict(test_x)
elapsed = (time.clock() - start)
log('Time used:' + str(elapsed) + 's')
return model, pred
def store_result(test_index, pred, threshold, name):
result = pd.DataFrame({'uid': test_index, 'prob': pred})
result = result.sort_values('prob', ascending=False)
result['label'] = 0
result.loc[result.prob > threshold, 'label'] = 1
result.to_csv('../data/output/sub/' + name + '.csv', index=0, header=0, columns=['uid', 'label'])
return result
########################################### Read data ###########################################
train = pd.read_csv(ROOT_PATH + 'data/input/train/uid_train.txt', header=None, sep='\t')
train.columns = ['uid', 'label']
train_voice = | pd.read_csv(ROOT_PATH + 'data/input/train/voice_train.txt', header=None, sep='\t') | pandas.read_csv |
##### file path
### input
# data_set keys and lebels
path_df_part_1_uic_label = "df_part_1_uic_label.csv"
path_df_part_2_uic_label = "df_part_2_uic_label.csv"
path_df_part_3_uic = "df_part_3_uic.csv"
# data_set features
path_df_part_1_U = "df_part_1_U.csv"
path_df_part_1_I = "df_part_1_I.csv"
path_df_part_1_C = "df_part_1_C.csv"
path_df_part_1_IC = "df_part_1_IC.csv"
path_df_part_1_UI = "df_part_1_UI.csv"
path_df_part_1_UC = "df_part_1_UC.csv"
path_df_part_2_U = "df_part_2_U.csv"
path_df_part_2_I = "df_part_2_I.csv"
path_df_part_2_C = "df_part_2_C.csv"
path_df_part_2_IC = "df_part_2_IC.csv"
path_df_part_2_UI = "df_part_2_UI.csv"
path_df_part_2_UC = "df_part_2_UC.csv"
path_df_part_3_U = "df_part_3_U.csv"
path_df_part_3_I = "df_part_3_I.csv"
path_df_part_3_C = "df_part_3_C.csv"
path_df_part_3_IC = "df_part_3_IC.csv"
path_df_part_3_UI = "df_part_3_UI.csv"
path_df_part_3_UC = "df_part_3_UC.csv"
### out file
### intermediate file
# data partition with diffferent label
path_df_part_1_uic_label_0 = "df_part_1_uic_label_0.csv"
path_df_part_1_uic_label_1 = "df_part_1_uic_label_1.csv"
path_df_part_2_uic_label_0 = "df_part_2_uic_label_0.csv"
path_df_part_2_uic_label_1 = "df_part_2_uic_label_1.csv"
# training set keys uic-label with k_means clusters' label
path_df_part_1_uic_label_cluster = "df_part_1_uic_label_cluster.csv"
path_df_part_2_uic_label_cluster = "df_part_2_uic_label_cluster.csv"
# scalers for data standardization store as python pickle
# for each part's features
path_df_part_1_scaler = "df_part_1_scaler"
path_df_part_2_scaler = "df_part_2_scaler"
import pandas as pd
import numpy as np
def df_read(path, mode='r'):
'''the definition of dataframe loading function
'''
path_df = open(path, mode)
try:
df = pd.read_csv(path_df, index_col=False)
finally:
path_df.close()
return df
def subsample(df, sub_size):
'''the definition of sub-sampling function
@param df: dataframe
@param sub_size: sub_sample set size
@return sub-dataframe with the same formation of df
'''
if sub_size >= len(df):
return df
else:
return df.sample(n=sub_size)
########################################################################
'''Step 1: dividing of positive and negative sub-set by u-i-c-label keys
p.s. we first generate u-i-C key, then merging for data set and operation by chunk
such strange operation designed for saving my poor PC-MEM.
'''
df_part_1_uic_label = df_read(path_df_part_1_uic_label) # loading total keys
df_part_2_uic_label = df_read(path_df_part_2_uic_label)
df_part_1_uic_label_0 = df_part_1_uic_label[df_part_1_uic_label['label'] == 0]
df_part_1_uic_label_1 = df_part_1_uic_label[df_part_1_uic_label['label'] == 1]
df_part_2_uic_label_0 = df_part_2_uic_label[df_part_2_uic_label['label'] == 0]
df_part_2_uic_label_1 = df_part_2_uic_label[df_part_2_uic_label['label'] == 1]
df_part_1_uic_label_0.to_csv(path_df_part_1_uic_label_0, index=False)
df_part_1_uic_label_1.to_csv(path_df_part_1_uic_label_1, index=False)
df_part_2_uic_label_0.to_csv(path_df_part_2_uic_label_0, index=False)
df_part_2_uic_label_1.to_csv(path_df_part_2_uic_label_1, index=False)
#######################################################################
'''Step 2: clustering on negative sub-set
clusters number ~ 35, using mini-batch-k-means
'''
# clustering based on sklearn
from sklearn import preprocessing
from sklearn.cluster import MiniBatchKMeans
import pickle
##### part_1 #####
# loading features
df_part_1_U = df_read(path_df_part_1_U)
df_part_1_I = df_read(path_df_part_1_I)
df_part_1_C = df_read(path_df_part_1_C)
df_part_1_IC = df_read(path_df_part_1_IC)
df_part_1_UI = df_read(path_df_part_1_UI)
df_part_1_UC = df_read(path_df_part_1_UC)
# process by chunk as ui-pairs size is too big
# for get scale transform mechanism to large scale of data
scaler_1 = preprocessing.StandardScaler()
batch = 0
for df_part_1_uic_label_0 in pd.read_csv(open(path_df_part_1_uic_label_0, 'r'), chunksize=150000):
try:
# construct of part_1's sub-training set
train_data_df_part_1 = pd.merge(df_part_1_uic_label_0, df_part_1_U, how='left', on=['user_id'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_I, how='left', on=['item_id'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_C, how='left', on=['item_category'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_IC, how='left', on=['item_id', 'item_category'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_UI, how='left',
on=['user_id', 'item_id', 'item_category', 'label'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_UC, how='left', on=['user_id', 'item_category'])
# getting all the complete features for clustering
train_X_1 = train_data_df_part_1.as_matrix(
['u_b1_count_in_6', 'u_b2_count_in_6', 'u_b3_count_in_6', 'u_b4_count_in_6', 'u_b_count_in_6',
'u_b1_count_in_3', 'u_b2_count_in_3', 'u_b3_count_in_3', 'u_b4_count_in_3', 'u_b_count_in_3',
'u_b1_count_in_1', 'u_b2_count_in_1', 'u_b3_count_in_1', 'u_b4_count_in_1', 'u_b_count_in_1',
'u_b4_rate',
'i_u_count_in_6', 'i_u_count_in_3', 'i_u_count_in_1',
'i_b1_count_in_6', 'i_b2_count_in_6', 'i_b3_count_in_6', 'i_b4_count_in_6', 'i_b_count_in_6',
'i_b1_count_in_3', 'i_b2_count_in_3', 'i_b3_count_in_3', 'i_b4_count_in_3', 'i_b_count_in_3',
'i_b1_count_in_1', 'i_b2_count_in_1', 'i_b3_count_in_1', 'i_b4_count_in_1', 'i_b_count_in_1',
'i_b4_rate',
'c_b1_count_in_6', 'c_b2_count_in_6', 'c_b3_count_in_6', 'c_b4_count_in_6', 'c_b_count_in_6',
'c_b1_count_in_3', 'c_b2_count_in_3', 'c_b3_count_in_3', 'c_b4_count_in_3', 'c_b_count_in_3',
'c_b1_count_in_1', 'c_b2_count_in_1', 'c_b3_count_in_1', 'c_b4_count_in_1', 'c_b_count_in_1',
'c_b4_rate',
'ic_u_rank_in_c', 'ic_b_rank_in_c', 'ic_b4_rank_in_c',
'ui_b1_count_in_6', 'ui_b2_count_in_6', 'ui_b3_count_in_6', 'ui_b4_count_in_6', 'ui_b_count_in_6',
'ui_b1_count_in_3', 'ui_b2_count_in_3', 'ui_b3_count_in_3', 'ui_b4_count_in_3', 'ui_b_count_in_3',
'ui_b1_count_in_1', 'ui_b2_count_in_1', 'ui_b3_count_in_1', 'ui_b4_count_in_1', 'ui_b_count_in_1',
'ui_b_count_rank_in_u', 'ui_b_count_rank_in_uc',
'uc_b1_count_in_6', 'uc_b2_count_in_6', 'uc_b3_count_in_6', 'uc_b4_count_in_6', 'uc_b_count_in_6',
'uc_b1_count_in_3', 'uc_b2_count_in_3', 'uc_b3_count_in_3', 'uc_b4_count_in_3', 'uc_b_count_in_3',
'uc_b1_count_in_1', 'uc_b2_count_in_1', 'uc_b3_count_in_1', 'uc_b4_count_in_1', 'uc_b_count_in_1',
'uc_b_count_rank_in_u'])
# feature standardization
scaler_1.partial_fit(train_X_1)
batch += 1
print('chunk %d done.' % batch)
except StopIteration:
print("finish.")
break
# initial clusters
mbk_1 = MiniBatchKMeans(init='k-means++', n_clusters=1000, batch_size=500, reassignment_ratio=10 ** -4)
classes_1 = []
batch = 0
for df_part_1_uic_label_0 in pd.read_csv(open(path_df_part_1_uic_label_0, 'r'), chunksize=15000):
try:
# construct of part_1's sub-training set
train_data_df_part_1 = pd.merge(df_part_1_uic_label_0, df_part_1_U, how='left', on=['user_id'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_I, how='left', on=['item_id'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_C, how='left', on=['item_category'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_IC, how='left', on=['item_id', 'item_category'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_UI, how='left',
on=['user_id', 'item_id', 'item_category', 'label'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_UC, how='left', on=['user_id', 'item_category'])
train_X_1 = train_data_df_part_1.as_matrix(
['u_b1_count_in_6', 'u_b2_count_in_6', 'u_b3_count_in_6', 'u_b4_count_in_6', 'u_b_count_in_6',
'u_b1_count_in_3', 'u_b2_count_in_3', 'u_b3_count_in_3', 'u_b4_count_in_3', 'u_b_count_in_3',
'u_b1_count_in_1', 'u_b2_count_in_1', 'u_b3_count_in_1', 'u_b4_count_in_1', 'u_b_count_in_1',
'u_b4_rate',
'i_u_count_in_6', 'i_u_count_in_3', 'i_u_count_in_1',
'i_b1_count_in_6', 'i_b2_count_in_6', 'i_b3_count_in_6', 'i_b4_count_in_6', 'i_b_count_in_6',
'i_b1_count_in_3', 'i_b2_count_in_3', 'i_b3_count_in_3', 'i_b4_count_in_3', 'i_b_count_in_3',
'i_b1_count_in_1', 'i_b2_count_in_1', 'i_b3_count_in_1', 'i_b4_count_in_1', 'i_b_count_in_1',
'i_b4_rate',
'c_b1_count_in_6', 'c_b2_count_in_6', 'c_b3_count_in_6', 'c_b4_count_in_6', 'c_b_count_in_6',
'c_b1_count_in_3', 'c_b2_count_in_3', 'c_b3_count_in_3', 'c_b4_count_in_3', 'c_b_count_in_3',
'c_b1_count_in_1', 'c_b2_count_in_1', 'c_b3_count_in_1', 'c_b4_count_in_1', 'c_b_count_in_1',
'c_b4_rate',
'ic_u_rank_in_c', 'ic_b_rank_in_c', 'ic_b4_rank_in_c',
'ui_b1_count_in_6', 'ui_b2_count_in_6', 'ui_b3_count_in_6', 'ui_b4_count_in_6', 'ui_b_count_in_6',
'ui_b1_count_in_3', 'ui_b2_count_in_3', 'ui_b3_count_in_3', 'ui_b4_count_in_3', 'ui_b_count_in_3',
'ui_b1_count_in_1', 'ui_b2_count_in_1', 'ui_b3_count_in_1', 'ui_b4_count_in_1', 'ui_b_count_in_1',
'ui_b_count_rank_in_u', 'ui_b_count_rank_in_uc',
'uc_b1_count_in_6', 'uc_b2_count_in_6', 'uc_b3_count_in_6', 'uc_b4_count_in_6', 'uc_b_count_in_6',
'uc_b1_count_in_3', 'uc_b2_count_in_3', 'uc_b3_count_in_3', 'uc_b4_count_in_3', 'uc_b_count_in_3',
'uc_b1_count_in_1', 'uc_b2_count_in_1', 'uc_b3_count_in_1', 'uc_b4_count_in_1', 'uc_b_count_in_1',
'uc_b_count_rank_in_u'])
# feature standardization
standardized_train_X_1 = scaler_1.transform(train_X_1)
# fit clustering model
mbk_1.partial_fit(standardized_train_X_1)
classes_1 = np.append(classes_1, mbk_1.labels_)
batch += 1
print('chunk %d done.' % batch)
except StopIteration:
print(" ------------ k-means finished on part 1 ------------.")
break
del (df_part_1_U)
del (df_part_1_I)
del (df_part_1_C)
del (df_part_1_IC)
del (df_part_1_UI)
del (df_part_1_UC)
##### part_2 #####
# loading features
df_part_2_U = df_read(path_df_part_2_U)
df_part_2_I = df_read(path_df_part_2_I)
df_part_2_C = df_read(path_df_part_2_C)
df_part_2_IC = df_read(path_df_part_2_IC)
df_part_2_UI = df_read(path_df_part_2_UI)
df_part_2_UC = df_read(path_df_part_2_UC)
# process by chunk as ui-pairs size is too big
# for get scale transform mechanism to large scale of data
scaler_2 = preprocessing.StandardScaler()
batch = 0
for df_part_2_uic_label_0 in pd.read_csv(open(path_df_part_2_uic_label_0, 'r'), chunksize=150000):
try:
# construct of part_1's sub-training set
train_data_df_part_2 = pd.merge(df_part_2_uic_label_0, df_part_2_U, how='left', on=['user_id'])
train_data_df_part_2 = pd.merge(train_data_df_part_2, df_part_2_I, how='left', on=['item_id'])
train_data_df_part_2 = pd.merge(train_data_df_part_2, df_part_2_C, how='left', on=['item_category'])
train_data_df_part_2 = pd.merge(train_data_df_part_2, df_part_2_IC, how='left', on=['item_id', 'item_category'])
train_data_df_part_2 = pd.merge(train_data_df_part_2, df_part_2_UI, how='left',
on=['user_id', 'item_id', 'item_category', 'label'])
train_data_df_part_2 = pd.merge(train_data_df_part_2, df_part_2_UC, how='left', on=['user_id', 'item_category'])
train_X_2 = train_data_df_part_2.as_matrix(
['u_b1_count_in_6', 'u_b2_count_in_6', 'u_b3_count_in_6', 'u_b4_count_in_6', 'u_b_count_in_6',
'u_b1_count_in_3', 'u_b2_count_in_3', 'u_b3_count_in_3', 'u_b4_count_in_3', 'u_b_count_in_3',
'u_b1_count_in_1', 'u_b2_count_in_1', 'u_b3_count_in_1', 'u_b4_count_in_1', 'u_b_count_in_1',
'u_b4_rate',
'i_u_count_in_6', 'i_u_count_in_3', 'i_u_count_in_1',
'i_b1_count_in_6', 'i_b2_count_in_6', 'i_b3_count_in_6', 'i_b4_count_in_6', 'i_b_count_in_6',
'i_b1_count_in_3', 'i_b2_count_in_3', 'i_b3_count_in_3', 'i_b4_count_in_3', 'i_b_count_in_3',
'i_b1_count_in_1', 'i_b2_count_in_1', 'i_b3_count_in_1', 'i_b4_count_in_1', 'i_b_count_in_1',
'i_b4_rate',
'c_b1_count_in_6', 'c_b2_count_in_6', 'c_b3_count_in_6', 'c_b4_count_in_6', 'c_b_count_in_6',
'c_b1_count_in_3', 'c_b2_count_in_3', 'c_b3_count_in_3', 'c_b4_count_in_3', 'c_b_count_in_3',
'c_b1_count_in_1', 'c_b2_count_in_1', 'c_b3_count_in_1', 'c_b4_count_in_1', 'c_b_count_in_1',
'c_b4_rate',
'ic_u_rank_in_c', 'ic_b_rank_in_c', 'ic_b4_rank_in_c',
'ui_b1_count_in_6', 'ui_b2_count_in_6', 'ui_b3_count_in_6', 'ui_b4_count_in_6', 'ui_b_count_in_6',
'ui_b1_count_in_3', 'ui_b2_count_in_3', 'ui_b3_count_in_3', 'ui_b4_count_in_3', 'ui_b_count_in_3',
'ui_b1_count_in_1', 'ui_b2_count_in_1', 'ui_b3_count_in_1', 'ui_b4_count_in_1', 'ui_b_count_in_1',
'ui_b_count_rank_in_u', 'ui_b_count_rank_in_uc',
'uc_b1_count_in_6', 'uc_b2_count_in_6', 'uc_b3_count_in_6', 'uc_b4_count_in_6', 'uc_b_count_in_6',
'uc_b1_count_in_3', 'uc_b2_count_in_3', 'uc_b3_count_in_3', 'uc_b4_count_in_3', 'uc_b_count_in_3',
'uc_b1_count_in_1', 'uc_b2_count_in_1', 'uc_b3_count_in_1', 'uc_b4_count_in_1', 'uc_b_count_in_1',
'uc_b_count_rank_in_u'])
# fit the scaler
scaler_2.partial_fit(train_X_2)
batch += 1
print('chunk %d done.' % batch)
except StopIteration:
print("finish.")
break
# initial clusters
mbk_2 = MiniBatchKMeans(init='k-means++', n_clusters=1000, batch_size=500, reassignment_ratio=10 ** -4)
# process by chunk as ui-pairs size is too big
batch = 0
classes_2 = []
for df_part_2_uic_label_0 in pd.read_csv(open(path_df_part_2_uic_label_0, 'r'), chunksize=15000):
try:
# construct of part_1's sub-training set
train_data_df_part_2 = pd.merge(df_part_2_uic_label_0, df_part_2_U, how='left', on=['user_id'])
train_data_df_part_2 = | pd.merge(train_data_df_part_2, df_part_2_I, how='left', on=['item_id']) | pandas.merge |
"""
Tests the coalescence tree object.
"""
import os
import random
import shutil
import sqlite3
import sys
import unittest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from setup_tests import setUpAll, tearDownAll, skipLongTest
from pycoalescence import Simulation
from pycoalescence.coalescence_tree import CoalescenceTree, get_parameter_description
from pycoalescence.sqlite_connection import check_sql_table_exist
def setUpModule():
"""
Creates the output directory and moves logging files
"""
setUpAll()
t = CoalescenceTree("sample/sample.db")
t.clear_calculations()
def tearDownModule():
"""
Removes the output directory
"""
tearDownAll()
class TestNullSimulationErrors(unittest.TestCase):
"""
Tests that simulations that are not linked raise the correct error.
"""
def testRaisesError(self):
"""
Tests that a null simulation will raise an error when any operation is performed.
"""
t = CoalescenceTree()
with self.assertRaises(RuntimeError):
t.get_species_richness()
with self.assertRaises(RuntimeError):
t.calculate_fragment_richness()
with self.assertRaises(RuntimeError):
t.calculate_alpha_diversity()
with self.assertRaises(RuntimeError):
t.calculate_beta_diversity()
with self.assertRaises(RuntimeError):
t.calculate_fragment_abundances()
with self.assertRaises(RuntimeError):
t.calculate_fragment_octaves()
with self.assertRaises(RuntimeError):
t.calculate_octaves()
with self.assertRaises(RuntimeError):
t.get_fragment_list()
with self.assertRaises(RuntimeError):
t.get_alpha_diversity()
with self.assertRaises(RuntimeError):
t.get_beta_diversity()
with self.assertRaises(RuntimeError):
t.get_community_references()
with self.assertRaises(RuntimeError):
t.get_metacommunity_references()
with self.assertRaises(RuntimeError):
t.get_species_locations()
with self.assertRaises(RuntimeError):
t.get_species_abundances()
with self.assertRaises(RuntimeError):
t.get_species_list()
with self.assertRaises(RuntimeError):
_ = t.get_simulation_parameters()
with self.assertRaises(RuntimeError):
t.get_fragment_abundances("null", 1)
with self.assertRaises(RuntimeError):
t.get_species_richness()
with self.assertRaises(RuntimeError):
t.get_octaves(1)
class TestParameterDescriptions(unittest.TestCase):
"""
Tests that program correctly reads from the parameter_descriptions.json dictionary.
"""
def testReadsCorrectly(self):
"""
Tests that the dictionary is read correctly.
"""
tmp_dict = {
"habitat_change_rate": "the rate of change from present density maps to historic density maps",
"sample_file": "the sample area map for spatially selective sampling. Can be null to sample all " "cells",
"sample_x": "the sample map x dimension",
"sample_y": "the sample map y dimension",
"sample_x_offset": "the sample x map offset from the grid",
"sample_y_offset": "the sample y map offset from the grid",
"output_dir": "the output directory for the simulation database",
"seed": "the random seed to start the simulation, for repeatability",
"coarse_map_x": "the coarse density map x dimension",
"fine_map_file": "the density map file location at the finer resolution, covering a smaller area",
"tau": "the tau dispersal value for fat-tailed dispersal",
"grid_y": "the simulated grid y dimension",
"dispersal_relative_cost": "the relative rate of moving through non-habitat compared to habitat",
"fine_map_y_offset": "the number of cells the fine map is offset from the sample map in the y "
"dimension, at the fine resolution",
"gen_since_historical": "the number of generations that occur before the historical, or historic,"
" state is reached",
"dispersal_method": "the dispersal method used. Can be one of 'normal', 'norm-uniform' or " "'fat-tail'.",
"historical_fine_map": "the historical, or historic, coarse density map file location",
"coarse_map_scale": "the scale of the coarse density map compared to the fine density map. 1 "
"means equal density",
"grid_x": "the simulated grid x dimension",
"coarse_map_file": "the density map file location at the coarser resolution, covering a larger " "area",
"min_num_species": "the minimum number of species known to exist (currently has no effect)",
"historical_coarse_map": "the historical, or historic, coarse density map file location",
"m_probability": "the probability of choosing from the uniform dispersal kernel in normal-uniform"
" dispersal",
"sigma": "the sigma dispersal value for normal, fat-tailed and normal-uniform dispersals",
"deme": "the number of individuals inhabiting a cell at a map density of 1",
"time_config_file": "will be 'set' if temporal sampling is used, 'null' otherwise",
"coarse_map_y": "the coarse density map y dimension",
"fine_map_x": "the fine density map x dimension",
"coarse_map_y_offset": "the number of cells the coarse map is offset from the fine map in the y "
"dimension, at the fine resolution",
"cutoff": "the maximal dispersal distance possible, for normal-uniform dispersal",
"fine_map_y": "the fine density map y dimension",
"sample_size": "the proportion of individuals to sample from each cell (0-1)",
"fine_map_x_offset": "the number of cells the fine map is offset from the sample map in the x "
"dimension, at the fine resolution",
"speciation_rate": "the minimum speciation rate the simulation was run with",
"task": "the job or task reference number given to this simulation",
"coarse_map_x_offset": "the number of cells the coarse map is offset from the fine map in the x "
"dimension, at the fine resolution",
"landscape_type": "if false, landscapes have hard boundaries. Otherwise, can be infinite, "
"with 1s everywhere, or tiled_coarse or tiled_fine for repeated units of tiled "
"maps",
"max_time": "the maximum simulation time to run for (in seconds)",
"sim_complete": "set to true upon simulation completion, false for incomplete simulations",
"protracted": "if true, the simulation was run with protracted speciation.",
"min_speciation_gen": "the minimum number of generations required before speciation can occur",
"max_speciation_gen": "the maximum number of generations a lineage can exist before it is " "speciated",
"dispersal_map": "a tif file where rows represent cumulative dispersal probability to every other "
"cell, using the row number = x + (y * x_max)",
}
t = CoalescenceTree("sample/sample.db")
sim_output = t.get_simulation_parameters()
for key in sim_output.keys():
self.assertIn(key, get_parameter_description().keys())
self.assertEqual(get_parameter_description(key), t.get_parameter_description(key))
for key in get_parameter_description().keys():
self.assertIn(key, sim_output.keys())
for key in tmp_dict.keys():
self.assertEqual(tmp_dict[key], get_parameter_description(key))
self.assertDictEqual(tmp_dict, get_parameter_description())
with self.assertRaises(KeyError):
get_parameter_description(key="notakey")
dispersal_parameters = t.dispersal_parameters()
expected_disp_dict = {
"dispersal_method": "normal",
"sigma": 3.55,
"tau": 0.470149,
"m_probability": 0,
"cutoff": 0,
}
for key in dispersal_parameters.keys():
self.assertIn(key, tmp_dict.keys())
self.assertIn(key, expected_disp_dict.keys())
for key, val in expected_disp_dict.items():
self.assertIn(key, dispersal_parameters.keys())
if isinstance(val, float):
self.assertAlmostEqual(val, dispersal_parameters[key])
else:
self.assertEqual(val, dispersal_parameters[key])
class TestCoalescenceTreeSettingSpeciationParameters(unittest.TestCase):
"""Tests that the correct errors are raised when speciation parameters are supplied incorrectly."""
@classmethod
def setUpClass(cls):
"""Generates the temporary databases to attempt analysis on."""
src = [os.path.join("sample", "sample{}.db".format(x)) for x in [2, 3]]
cls.dst = [os.path.join("output", "sample{}.db".format(x)) for x in [2, 3]]
for tmp_src, tmp_dst in zip(src, cls.dst):
if os.path.exists(tmp_dst):
os.remove(tmp_dst)
shutil.copy(tmp_src, tmp_dst)
def testSetSpeciationRates(self):
"""Tests setting speciation rates works as intended and raises appropriate errors"""
ct = CoalescenceTree(self.dst[0])
for attempt in ["a string", ["a", "string"], [["list", "list2"], 0.2, 0.1], [None]]:
with self.assertRaises(TypeError):
ct._set_speciation_rates(attempt)
with self.assertRaises(RuntimeError):
ct._set_speciation_rates(None)
for attempt in [-10, -2.0, 1.1, 100, [-1, 0.1, 0.2], [0.2, 0.8, 1.1]]:
with self.assertRaises(ValueError):
ct._set_speciation_rates(attempt)
expected_list = [0.1, 0.2, 0.3]
ct._set_speciation_rates(expected_list)
self.assertEqual(expected_list, ct.applied_speciation_rates_list)
ct._set_speciation_rates(0.2)
self.assertEqual([0.2], ct.applied_speciation_rates_list)
def testSetRecordFragments(self):
"""Tests that setting the record_fragments flag works as expected."""
ct = CoalescenceTree(self.dst[0])
ct._set_record_fragments(True)
self.assertEqual("null", ct.record_fragments)
ct._set_record_fragments(False)
self.assertEqual("F", ct.record_fragments)
for each in ["PlotBiodiversityMetrics.db", "doesntexist.csv"]:
config_path = os.path.join("sample", each)
with self.assertRaises(IOError):
ct._set_record_fragments(config_path)
expected = os.path.join("sample", "FragmentsTest.csv")
ct._set_record_fragments(expected)
self.assertEqual(expected, ct.record_fragments)
def testSetRecordSpatial(self):
"""Tests that the setting the record_spatial flag works as expected"""
ct = CoalescenceTree(self.dst[0])
ct._set_record_spatial("T")
self.assertTrue(ct.record_spatial)
ct._set_record_spatial("F")
self.assertFalse(ct.record_spatial)
with self.assertRaises(TypeError):
ct._set_record_spatial("nota bool")
ct._set_record_spatial(True)
self.assertTrue(ct.record_spatial)
def testSetMetacommunityParameters(self):
"""Tests that setting the metacommunity parameters works as expected."""
ct = CoalescenceTree(self.dst[0])
for size, spec in [[-10, 0.1], [10, -0.1], [10, 1.1]]:
with self.assertRaises(ValueError):
ct.fragments = "F"
ct._set_record_fragments(False)
ct._set_record_spatial(False)
ct.times = [0.0]
ct._set_metacommunity_parameters(size, spec)
ct._set_metacommunity_parameters()
self.assertEqual(0.0, ct.metacommunity_size)
self.assertEqual(0.0, ct.metacommunity_speciation_rate)
ct._set_metacommunity_parameters(10, 0.1, "simulated")
self.assertEqual(10, ct.metacommunity_size)
self.assertEqual(0.1, ct.metacommunity_speciation_rate)
def testSetProtractedParameters(self):
"""Tests that setting the protracted parameters works as expected."""
ct = CoalescenceTree(self.dst[0])
with self.assertRaises(ValueError):
ct._set_protracted_parameters(0.1, 100)
ct = CoalescenceTree(self.dst[1])
ct._set_protracted_parameters(10, 100)
self.assertEqual((10.0, 100.0), ct.protracted_parameters[0])
ct.protracted_parameters = []
for min_proc, max_proc in [[200, 5000], [80, 50], [200, 11000]]:
with self.assertRaises(ValueError):
ct._check_protracted_parameters(min_proc, max_proc)
with self.assertRaises(ValueError):
ct._set_protracted_parameters(min_proc, max_proc)
with self.assertRaises(ValueError):
ct.add_protracted_parameters(min_proc, max_proc)
ct._set_protracted_parameters(50, 5000)
self.assertEqual((50.0, 5000.0), ct.protracted_parameters[0])
ct.protracted_parameters = []
ct._set_protracted_parameters()
self.assertEqual((0.0, 0.0), ct.protracted_parameters[0])
def testSetSampleFile(self):
"""Tests that the sample file is correctly set."""
ct = CoalescenceTree(self.dst[0])
for file in ["notafile.tif", os.path.join("sample", "sample.db")]:
with self.assertRaises(IOError):
ct._set_sample_file(file)
ct._set_sample_file()
self.assertEqual("null", ct.sample_file)
expected_file = os.path.join("sample", "SA_sample_coarse.tif")
ct._set_sample_file(expected_file)
self.assertEqual(expected_file, ct.sample_file)
def testSetTimes(self):
"""Tests that times are correctly set."""
ct = CoalescenceTree(self.dst[0])
ct._set_times(None)
self.assertEqual(0.0, ct.times[0])
with self.assertRaises(TypeError):
ct.add_times(0.5)
with self.assertRaises(TypeError):
ct.add_times([0.2, 0.5, "string"])
ct.times = None
ct.add_times([0.2, 0.5, 10])
self.assertEqual([0.0, 0.2, 0.5, 10.0], ct.times)
ct.times = None
ct._set_times(0.2)
self.assertEqual([0.0, 0.2], ct.times)
ct.times = None
ct._set_times([0.1, 0.5, 10.0])
self.assertEqual([0.0, 0.1, 0.5, 10.0], ct.times)
class TestCoalescenceTreeParameters(unittest.TestCase):
"""Tests that parameters are correctly obtained from the databases and the relevant errors are raised."""
def testCommunityParameters1(self):
"""Tests the community parameters make sense in a very simple community."""
shutil.copyfile(os.path.join("sample", "sample3.db"), os.path.join("output", "temp_sample3.db"))
t = CoalescenceTree(os.path.join("output", "temp_sample3.db"), logging_level=50)
self.assertEqual([], t.get_metacommunity_references())
self.assertEqual([1], t.get_community_references())
params = t.get_community_parameters(1)
expected_dict = {
"speciation_rate": 0.001,
"time": 0.0,
"fragments": 0,
"metacommunity_reference": 0,
"min_speciation_gen": 100.0,
"max_speciation_gen": 10000.0,
}
self.assertEqual(expected_dict, params)
with self.assertRaises(sqlite3.Error):
t.get_metacommunity_parameters(1)
with self.assertRaises(KeyError):
t.get_community_parameters(2)
with self.assertRaises(KeyError):
t.get_community_reference(0.1, 0.0, 0, 0, 0.0, min_speciation_gen=100.0, max_speciation_gen=10000.0)
with self.assertRaises(KeyError):
_ = t.get_community_reference(speciation_rate=0.001, time=0.0, fragments=False)
ref = t.get_community_reference(
speciation_rate=0.001, time=0.0, fragments=False, min_speciation_gen=100.0, max_speciation_gen=10000.0
)
self.assertEqual(1, ref)
self.assertEqual(expected_dict, t.get_community_parameters(ref))
t.wipe_data()
with self.assertRaises(IOError):
t.get_community_parameters_pd()
def testCommunityParameters2(self):
"""Tests the community parameters make sense in a very simple community."""
t = CoalescenceTree(os.path.join("sample", "sample4.db"))
self.assertEqual([1, 2, 3, 4, 5], t.get_community_references())
expected_params1 = {"speciation_rate": 0.1, "time": 0.0, "fragments": 0, "metacommunity_reference": 0}
expected_params2 = {"speciation_rate": 0.1, "time": 0.0, "fragments": 0, "metacommunity_reference": 1}
expected_params3 = {"speciation_rate": 0.2, "time": 0.0, "fragments": 0, "metacommunity_reference": 1}
expected_params4 = {"speciation_rate": 0.1, "time": 0.0, "fragments": 0, "metacommunity_reference": 2}
expected_params5 = {"speciation_rate": 0.2, "time": 0.0, "fragments": 0, "metacommunity_reference": 2}
expected_meta_params1 = {
"speciation_rate": 0.001,
"metacommunity_size": 10000.0,
"option": "simulated",
"external_reference": 0,
}
expected_meta_params2 = {
"speciation_rate": 0.001,
"metacommunity_size": 10000.0,
"option": "analytical",
"external_reference": 0,
}
params1 = t.get_community_parameters(1)
params2 = t.get_community_parameters(2)
params3 = t.get_community_parameters(3)
params4 = t.get_community_parameters(4)
params5 = t.get_community_parameters(5)
params6 = t.get_metacommunity_parameters(1)
params7 = t.get_metacommunity_parameters(2)
self.assertEqual([1, 2], t.get_metacommunity_references())
self.assertEqual(expected_params1, params1)
self.assertEqual(expected_params2, params2)
self.assertEqual(expected_params3, params3)
self.assertEqual(expected_params4, params4)
self.assertEqual(expected_params5, params5)
self.assertEqual(expected_meta_params1, params6)
self.assertEqual(expected_meta_params2, params7)
with self.assertRaises(KeyError):
t.get_community_parameters(6)
with self.assertRaises(KeyError):
t.get_metacommunity_parameters(3)
ref1 = t.get_community_reference(speciation_rate=0.1, time=0.0, fragments=False)
with self.assertRaises(KeyError):
t.get_community_reference(
speciation_rate=0.1, time=0.0, fragments=False, min_speciation_gen=0.1, max_speciation_gen=10000.0
)
ref2 = t.get_community_reference(
speciation_rate=0.1,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.001,
metacommunity_option="simulated",
)
with self.assertRaises(KeyError):
t.get_community_reference(
speciation_rate=0.1,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.01,
metacommunity_option="simulated",
)
ref3 = t.get_community_reference(
speciation_rate=0.2,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.001,
metacommunity_option="simulated",
)
ref4 = t.get_community_reference(
speciation_rate=0.1,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.001,
metacommunity_option="analytical",
)
ref5 = t.get_community_reference(
speciation_rate=0.2,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.001,
metacommunity_option="analytical",
)
self.assertEqual(1, ref1)
self.assertEqual(2, ref2)
self.assertEqual(3, ref3)
self.assertEqual(4, ref4)
self.assertEqual(5, ref5)
expected_community_params_list = []
for reference in t.get_community_references():
params = t.get_community_parameters(reference)
params["reference"] = reference
expected_community_params_list.append(params)
expected_community_params = pd.DataFrame(expected_community_params_list)
actual_output = t.get_community_parameters_pd()
assert_frame_equal(expected_community_params, actual_output, check_like=True)
def testIsComplete(self):
"""Tests sims are correctly identified as complete."""
t = CoalescenceTree(os.path.join("sample", "sample4.db"))
self.assertTrue(t.is_complete)
class TestCoalescenceTreeAnalysis(unittest.TestCase):
"""Tests analysis is performed correctly"""
@classmethod
def setUpClass(cls):
"""Sets up the Coalescence object test case."""
dst1 = os.path.join("output", "sampledb0.db")
for i in range(0, 11):
dst = os.path.join("output", "sampledb{}.db".format(i))
if os.path.exists(dst):
os.remove(dst)
shutil.copyfile(os.path.join("sample", "sample.db"), dst)
shutil.copyfile(os.path.join("sample", "nse_reference.db"), os.path.join("output", "nse_reference1.db"))
random.seed(2)
cls.test = CoalescenceTree(dst1, logging_level=50)
cls.test.clear_calculations()
cls.test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
cls.test.calculate_fragment_richness()
cls.test.calculate_fragment_octaves()
cls.test.calculate_octaves_error()
cls.test.calculate_alpha_diversity()
cls.test.calculate_beta_diversity()
cls.test2 = CoalescenceTree()
cls.test2.set_database(os.path.join("sample", "sample_nofrag.db"))
dstx = os.path.join("output", "sampledbx.db")
shutil.copyfile(dst1, dstx)
c = CoalescenceTree(dstx)
c.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
c.calculate_goodness_of_fit()
@classmethod
def tearDownClass(cls):
"""
Removes the files from output."
"""
cls.test.clear_calculations()
def testComparisonDataNoExistError(self):
c = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(IOError):
c.import_comparison_data(os.path.join("sample", "doesnotexist.db"))
def testFragmentOctaves(self):
num = self.test.cursor.execute(
"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'P09' AND octave == 0"
" AND community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 7, msg="Fragment octaves not correctly calculated.")
num = self.test.cursor.execute(
"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'P09' AND octave == 0 "
" AND community_reference == 2"
).fetchall()[0][0]
self.assertEqual(num, 7, msg="Fragment octaves not correctly calculated.")
num = self.test.cursor.execute(
"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'cerrogalera' AND octave == 1 "
" AND community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 3, msg="Fragment octaves not correctly calculated.")
num = self.test.cursor.execute(
"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'whole' AND octave == 1 "
" AND community_reference == 2"
).fetchall()[0][0]
self.assertEqual(num, 221, msg="Fragment octaves not correctly calculated.")
def testFragmentAbundances(self):
"""
Tests that fragment abundances are produced properly by the fragment detection functions.
"""
num = self.test.cursor.execute(
"SELECT COUNT(fragment) FROM FRAGMENT_ABUNDANCES WHERE fragment == 'P09' " " AND community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 9, msg="Fragment abundances not correctly calculated.")
num = self.test.cursor.execute(
"SELECT COUNT(fragment) FROM FRAGMENT_ABUNDANCES WHERE fragment == 'P09' " " AND community_reference == 2"
).fetchall()[0][0]
self.assertEqual(num, 9, msg="Fragment abundances not correctly calculated.")
num = self.test.cursor.execute(
"SELECT COUNT(fragment) FROM FRAGMENT_ABUNDANCES WHERE fragment == 'cerrogalera' "
" AND community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 9, msg="Fragment abundances not correctly calculated.")
def testSpeciesAbundances(self):
"""Tests that the produced species abundances are correct by comparing species richness."""
num = self.test.cursor.execute(
"SELECT COUNT(species_id) FROM SPECIES_ABUNDANCES WHERE community_reference == 2"
).fetchall()[0][0]
self.assertEqual(num, 1029, msg="Species abundances not correctly calculated.")
num = self.test.cursor.execute(
"SELECT COUNT(species_id) FROM SPECIES_ABUNDANCES WHERE community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 884, msg="Species abundances not correctly calculated.")
def testGetOctaves(self):
"""Tests getting the octaves."""
c = CoalescenceTree(os.path.join("output", "sampledb4.db"))
c.clear_calculations()
c.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
c.calculate_richness()
self.assertEqual([[0, 585], [1, 231], [2, 59], [3, 5]], c.get_octaves(1))
c = CoalescenceTree(os.path.join("output", "sampledb4.db"))
c.clear_calculations()
c.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
c.calculate_richness()
actual = c.get_octaves_pd().head()
expected = pd.DataFrame(
[[1, 0, 585], [1, 1, 231], [1, 2, 59], [1, 3, 5], [2, 0, 760]],
columns=["community_reference", "octave", "richness"],
)
assert_frame_equal(actual, expected, check_like=True)
def testSpeciesLocations(self):
"""
Tests that species locations have been correctly assigned.
"""
num = self.test.cursor.execute(
"SELECT species_id FROM SPECIES_LOCATIONS WHERE x==1662 AND y==4359 " " AND community_reference == 1"
).fetchall()
self.assertEqual(len(set(num)), 2, msg="Species locations not correctly assigned")
all_list = self.test.get_species_locations()
select_list = self.test.get_species_locations(community_reference=1)
self.assertListEqual([1, 1662, 4359, 1], all_list[0])
self.assertListEqual([1, 1662, 4359], select_list[0])
def testAlphaDiversity(self):
"""
Tests that alpha diversity is correctly calculated and fetched for each parameter reference
"""
c = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(IOError):
c.get_alpha_diversity_pd()
self.assertEqual(9, self.test.get_alpha_diversity(1))
self.assertEqual(10, self.test.get_alpha_diversity(2))
expected_alphas_list = []
for reference in self.test.get_community_references():
expected_alphas_list.append(
{"community_reference": reference, "alpha_diversity": self.test.get_alpha_diversity(reference)}
)
expected_alphas = pd.DataFrame(expected_alphas_list).reset_index(drop=True)
actual_alphas = self.test.get_alpha_diversity_pd().reset_index(drop=True)
assert_frame_equal(expected_alphas, actual_alphas, check_like=True)
def testBetaDiversity(self):
"""
Tests that beta diversity is correctly calculated and fetched for the reference
"""
c = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(IOError):
c.get_beta_diversity_pd()
self.assertAlmostEqual(98.111111111, self.test.get_beta_diversity(1), places=5)
self.assertAlmostEqual(102.8, self.test.get_beta_diversity(2), places=5)
expected_betas_list = []
for reference in self.test.get_community_references():
expected_betas_list.append(
{"community_reference": reference, "beta_diversity": self.test.get_beta_diversity(reference)}
)
expected_betas = pd.DataFrame(expected_betas_list).reset_index(drop=True)
actual_betas = self.test.get_beta_diversity_pd().reset_index(drop=True)
assert_frame_equal(expected_betas, actual_betas, check_like=True)
def testGetNumberIndividuals(self):
"""Tests that the number of individuals is obtained correctly."""
c = CoalescenceTree(os.path.join("output", "sampledb7.db"))
self.assertEqual(1504, c.get_number_individuals(community_reference=1))
self.assertEqual(12, c.get_number_individuals(fragment="P09", community_reference=1))
c.wipe_data()
c.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
with self.assertRaises(IOError):
c.get_number_individuals(fragment="none")
with self.assertRaises(IOError):
c.get_number_individuals()
def testGetFragmentAbundances(self):
"""Tests that fragment abundances are correctly obtained."""
c = CoalescenceTree(os.path.join("sample", "sample3.db"))
with self.assertRaises(IOError):
c.get_fragment_abundances(fragment="P09", reference=1)
with self.assertRaises(IOError):
c.get_fragment_abundances_pd()
abundances = self.test.get_fragment_abundances(fragment="P09", reference=1)
expected_abundances = [[302, 1], [303, 1], [304, 1], [305, 1], [306, 1], [307, 1], [546, 2], [693, 1], [732, 3]]
self.assertEqual(expected_abundances, abundances[:10])
all_abundances = self.test.get_all_fragment_abundances()
expected_abundances2 = [
[1, "P09", 302, 1],
[1, "P09", 303, 1],
[1, "P09", 304, 1],
[1, "P09", 305, 1],
[1, "P09", 306, 1],
[1, "P09", 307, 1],
[1, "P09", 546, 2],
[1, "P09", 693, 1],
[1, "P09", 732, 3],
[1, "cerrogalera", 416, 1],
]
self.assertEqual(expected_abundances2, all_abundances[:10])
df = pd.DataFrame(
expected_abundances2, columns=["community_reference", "fragment", "species_id", "no_individuals"]
)
actual_df = self.test.get_fragment_abundances_pd().head(n=10)
assert_frame_equal(df, actual_df, check_like=True)
def testGetFragmentListErrors(self):
"""Tests the error is raised when obtaining fragment list."""
c = CoalescenceTree(os.path.join("output", "sampledb8.db"))
c.wipe_data()
with self.assertRaises(IOError):
c.get_fragment_list()
def testClearGoodnessFit(self):
"""Tests that goodness of fit are correctly cleared."""
c = CoalescenceTree(os.path.join("output", "sampledbx.db"))
exec_command = "SELECT * FROM BIODIVERSITY_METRICS WHERE metric LIKE 'goodness_%'"
self.assertTrue(len(c.cursor.execute(exec_command).fetchall()) >= 1)
c._clear_goodness_of_fit()
self.assertFalse(len(c.cursor.execute(exec_command).fetchall()) >= 1)
def testGetBiodiversityMetrics(self):
"""Tests that biodiversity metrics are correctly obtained from the database."""
c1 = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(IOError):
c1.get_biodiversity_metrics()
c2 = CoalescenceTree(os.path.join("sample", "sample2.db"))
expected_biodiversity_metrics = pd.DataFrame(
[
[1, "fragment_richness", "fragment2", 129.0, np.NaN, np.NaN],
[2, "fragment_richness", "fragment2", 130.0, np.NAN, np.NaN],
[1, "fragment_richness", "fragment1", 174.0, np.NaN, np.NaN],
[2, "fragment_richness", "fragment1", 175.0, np.NaN, np.NaN],
[1, "fragment_richness", "whole", 1163.0, np.NaN, np.NaN],
[2, "fragment_richness", "whole", 1170.0, np.NaN, np.NaN],
],
columns=["community_reference", "metric", "fragment", "value", "simulated", "actual"],
).reset_index(drop=True)
actual_biodiversity_metrics = c2.get_biodiversity_metrics().reset_index(drop=True).fillna(value=np.nan)
assert_frame_equal(expected_biodiversity_metrics, actual_biodiversity_metrics)
def testRaisesErrorNoFragmentsAlpha(self):
"""
Tests that an error is raised when alpha diversity is calculated without any fragment abundance data
"""
with self.assertRaises(IOError):
self.test2.calculate_alpha_diversity()
def testRaisesErrorNoFragmentsBeta(self):
"""
Tests that an error is raised when alpha diversity is calculated without any fragment abundance data
"""
with self.assertRaises(IOError):
self.test2.calculate_beta_diversity()
def testRaisesErrorNoFragmentsRichness(self):
"""
Tests that an error is raised when fragment richness is calculated without any fragment abundance data
"""
with self.assertRaises(IOError):
self.test2.calculate_fragment_richness()
def testRaisesErrorNoFragmentsOctaves(self):
"""
Tests that an error is raised when fragment richness is calculated without any fragment abundance data
"""
with self.assertRaises(IOError):
self.test2.calculate_fragment_octaves()
@unittest.skipIf(sys.version[0] != "3", "Skipping Python 3.x tests")
def testModelFitting2(self):
"""
Tests that the goodness-of-fit calculations are correctly performed.
"""
random.seed(2)
self.test.calculate_goodness_of_fit()
self.assertAlmostEqual(self.test.get_goodness_of_fit(), 0.30140801329929373, places=6)
self.assertAlmostEqual(self.test.get_goodness_of_fit_fragment_octaves(), 0.0680205429120108, places=6)
self.assertAlmostEqual(self.test.get_goodness_of_fit_fragment_richness(), 0.9244977999898334, places=6)
@unittest.skipIf(sys.version[0] == "3", "Skipping Python 2.x tests")
def testModelFitting3(self):
"""
Tests that the goodness-of-fit calculations are correctly performed.
"""
random.seed(2)
self.test.calculate_goodness_of_fit()
self.assertAlmostEqual(self.test.get_goodness_of_fit(), 0.30140801329929373, places=6)
self.assertAlmostEqual(self.test.get_goodness_of_fit_fragment_octaves(), 0.0680205429120108, places=6)
self.assertAlmostEqual(self.test.get_goodness_of_fit_fragment_richness(), 0.9244977999898334, places=6)
def testErrorIfNotApplied(self):
"""Tests that an error is raised if outputting is attempted without applying any community parameters."""
c = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(RuntimeError):
c.output()
def testFragmentNumbersMatching(self):
"""Checks behaviour when matching fragment numbers."""
test = CoalescenceTree(os.path.join("output", "sampledb1.db"), logging_level=50)
test.clear_calculations()
with self.assertRaises(RuntimeError):
test._check_fragment_numbers_match()
with self.assertRaises(ValueError):
test.calculate_fragment_abundances()
test._check_fragment_numbers_match()
test.comparison_file = os.path.join("sample", "PlotBiodiversityMetrics.db")
self.assertTrue(test._check_fragment_numbers_match())
test.fragment_abundances.pop(0)
self.assertFalse(test._check_fragment_numbers_match())
def testFragmentNumbersEqualisation(self):
"""Checks behaviour when equalising fragment numbers."""
test = CoalescenceTree(os.path.join("output", "sampledb2.db"), logging_level=50)
test.clear_calculations()
test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
test.calculate_fragment_richness()
self.test._equalise_fragment_number("notafrag", 1)
test.fragment_abundances[0][2] += 1000
test._equalise_fragment_number("P09", 1)
self.assertTrue(test._check_fragment_numbers_match())
def testFragmentNumbersErrors(self):
"""Checks behaviour when equalising fragment numbers."""
test = CoalescenceTree(os.path.join("output", "sampledb3.db"), logging_level=50)
test.clear_calculations()
test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
test.comparison_abundances = None
with self.assertRaises(ValueError):
test._equalise_all_fragment_numbers()
def testAdjustBiodiversityMetrics(self):
"""Checks that biodiversity metrics are correctly adjusted."""
test = CoalescenceTree(os.path.join("output", "sampledb5.db"), logging_level=50)
test.clear_calculations()
test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
test.adjust_data()
def testComparisonOctavesModification(self):
"""Tests that the comparison database is modified."""
test = CoalescenceTree(os.path.join("output", "sampledb6.db"), logging_level=50)
dst = os.path.join("output", "PlotBiodiversityMetricsNoAlpha2.db")
shutil.copy(os.path.join("sample", "PlotBiodiversityMetricsNoAlpha.db"), dst)
test.import_comparison_data(dst)
test.calculate_comparison_octaves(store=True)
self.assertTrue(os.path.exists(dst))
@unittest.skipIf(sys.version[0] == "2", "Skipping Python 3.x tests")
def testDownsamplingAndRevert(self):
"""Tests that downsampling works as intended and can be reverted."""
c = CoalescenceTree(os.path.join("output", "sampledb9.db"))
random.seed(a=10, version=3)
original_individuals = c.get_number_individuals()
original_richness = c.get_species_richness_pd()
c.wipe_data()
with self.assertRaises(ValueError):
c.downsample(sample_proportion=2.0)
c.downsample(sample_proportion=0.1)
c.set_speciation_parameters([0.1, 0.2])
c.apply()
new_individuals = c.get_number_individuals()
self.assertEqual(1452, new_individuals)
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
c = CoalescenceTree(os.path.join("output", "sampledb9.db"))
c.revert_downsample()
c.wipe_data()
c.set_speciation_parameters([0.1, 0.2])
c.apply()
final_individuals = c.get_number_individuals()
assert_frame_equal(original_richness, c.get_species_richness_pd())
self.assertEqual(original_individuals, final_individuals)
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertFalse(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
# Now test with NSE sim to ensure correct sampling
c = CoalescenceTree(os.path.join("output", "nse_reference1.db"))
nse_richness = c.get_species_richness_pd()
nse_no_individuals = c.get_number_individuals()
c.wipe_data()
c.downsample(sample_proportion=0.1)
c.set_speciation_parameters([0.000001, 0.999999])
c.apply()
new_no_individuals = c.get_number_individuals()
self.assertAlmostEqual(new_no_individuals / nse_no_individuals, 0.1, 5)
self.assertEqual(1000, c.get_species_richness(reference=2))
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
c = CoalescenceTree(os.path.join("output", "nse_reference1.db"))
c.revert_downsample()
c.wipe_data()
c.set_speciation_parameters([0.000001, 0.999999])
c.apply_incremental()
c.set_speciation_parameters([0.5])
c.apply()
actual_richness = c.get_species_richness_pd()
assert_frame_equal(nse_richness, actual_richness)
self.assertEqual(nse_no_individuals, c.get_number_individuals())
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertFalse(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
with self.assertRaises(IOError):
c.revert_downsample()
@unittest.skipIf(sys.version[0] == "2", "Skipping Python 3.x tests")
def testDownsamplingByLocationAndRevert(self):
"""Tests that downsampling works as intended and can be reverted."""
c = CoalescenceTree(os.path.join("output", "sampledb10.db"))
random.seed(a=10, version=3)
original_individuals = c.get_number_individuals()
original_richness = c.get_species_richness_pd()
c.wipe_data()
with self.assertRaises(ValueError):
c.downsample_at_locations(fragment_csv=os.path.join("sample", "FragmentsTestFail1.csv"))
with self.assertRaises(IOError):
c.downsample_at_locations(fragment_csv="not_a_file.csv")
c.downsample_at_locations(fragment_csv=os.path.join("sample", "FragmentsTest3.csv"))
c.set_speciation_parameters([0.1, 0.2])
c.apply()
new_individuals = c.get_number_individuals()
self.assertEqual(2, new_individuals)
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
c = CoalescenceTree(os.path.join("output", "sampledb10.db"))
c.revert_downsample()
c.wipe_data()
c.set_speciation_parameters([0.1, 0.2])
c.apply()
final_individuals = c.get_number_individuals()
assert_frame_equal(original_richness, c.get_species_richness_pd())
self.assertEqual(original_individuals, final_individuals)
self.assertTrue(check_sql_table_exist(c.database, "SPECIES_LIST"))
self.assertFalse(check_sql_table_exist(c.database, "SPECIES_LIST_ORIGINAL"))
c = CoalescenceTree(os.path.join("output", "sampledb10.db"))
c.wipe_data()
c.downsample_at_locations(fragment_csv=os.path.join("sample", "FragmentsTest4.csv"), ignore_errors=True)
c.set_speciation_parameters([0.1, 0.2])
c.apply()
new_individuals = c.get_number_individuals()
self.assertEqual(3, new_individuals)
class TestCoalescenceTreeWriteCsvs(unittest.TestCase):
"""Tests that csvs are correctly outputted."""
@classmethod
def setUpClass(cls):
"""Creates the CoalescenceTree object."""
cls.c = CoalescenceTree(os.path.join("sample", "nse_reference.db"))
def testWriteCommunityParameterToCsv(self):
"""Tests that community parameters are correctly written to a csv."""
output_csv = os.path.join("output", "community_parameters1.csv")
self.c.write_to_csv(output_csv, "COMMUNITY_PARAMETERS")
self.assertTrue(os.path.exists(output_csv))
import csv
if sys.version_info[0] < 3: # pragma: no cover
infile = open(output_csv, "rb")
else:
infile = open(output_csv, "r")
expected_output = [
["reference", "speciation_rate", "time", "fragments", "metacommunity_reference"],
["1", "1e-06", "0.0", "0", "0"],
["2", "0.99999", "0.0", "0", "0"],
["3", "0.5", "0.0", "0", "0"],
]
actual_output = []
with infile as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
actual_output.append(row)
self.assertEqual(expected_output, actual_output)
with self.assertRaises(IOError):
self.c.write_to_csv(output_csv, "COMMUNITY_PARAMETERS")
with self.assertRaises(KeyError):
self.c.write_to_csv("notacsv.csv", "NOTATABLE")
def testWritesAllCsvs(self):
"""Tests that all csvs write to the output correctly."""
output_dir = os.path.join("output", "csvdir")
if os.path.exists(output_dir):
os.remove(output_dir)
self.c.write_all_to_csvs(output_dir, "out1")
expected_tables = ["COMMUNITY_PARAMETERS", "SIMULATION_PARAMETERS", "SPECIES_ABUNDANCES", "SPECIES_LIST"]
for table in expected_tables:
self.assertTrue(os.path.exists(os.path.join(output_dir, "out1_{}.csv".format(table))))
for file in os.listdir(output_dir):
if ".csv" in file:
self.assertIn(file, ["out1_{}.csv".format(x) for x in expected_tables])
self.c.write_all_to_csvs(output_dir, "out2.csv")
for table in expected_tables:
self.assertTrue(os.path.exists(os.path.join(output_dir, "out2_{}.csv".format(table))))
self.c.write_all_to_csvs(output_dir, "out3.")
for table in expected_tables:
self.assertTrue(os.path.exists(os.path.join(output_dir, "out3_{}.csv".format(table))))
class TestCoalescenceTreeSpeciesDistances(unittest.TestCase):
"""Tests analysis is performed correctly."""
@classmethod
def setUpClass(cls):
"""
Sets up the Coalescence object test case.
"""
dst = os.path.join("output", "sampledb1.db")
if os.path.exists(dst):
os.remove(dst)
shutil.copyfile(os.path.join("sample", "sample.db"), dst)
cls.test = CoalescenceTree(dst)
cls.test.clear_calculations()
cls.test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
cls.test.calculate_species_distance_similarity()
def testSpeciesDistanceSimilarity(self):
"""
Tests that the species distance similarity function works as intended.
"""
mean = self.test.cursor.execute(
"SELECT value FROM BIODIVERSITY_METRICS WHERE community_reference == 1 AND "
"metric == 'mean_distance_between_individuals'"
).fetchone()[0]
self.assertAlmostEqual(mean, 5.423769507803121, places=5)
species_distances = self.test.get_species_distance_similarity(community_reference=1)
# for distance, similar in species_distances:
# self.assertLessEqual(similar, dissimilar)
self.assertListEqual(species_distances[0], [0, 11])
self.assertListEqual(species_distances[1], [1, 274])
self.assertListEqual(species_distances[2], [2, 289])
class TestCoalescenceTreeAnalyseIncorrectComparison(unittest.TestCase):
"""
Tests errors are raised correctly for incorrect comparison data.
"""
@classmethod
def setUpClass(cls):
"""
Sets up the Coalescence object test case.
"""
random.seed(10)
dst = os.path.join("output", "sampledb2.db")
if os.path.exists(dst):
os.remove(dst)
shutil.copyfile(os.path.join("sample", "sample.db"), dst)
cls.test = CoalescenceTree(logging_level=40)
cls.test.set_database(dst)
cls.test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetricsNoAlpha.db"))
cls.test.calculate_comparison_octaves(False)
cls.test.clear_calculations()
cls.test.calculate_fragment_richness()
cls.test.calculate_fragment_octaves()
cls.test.calculate_octaves_error()
cls.test.calculate_alpha_diversity()
cls.test.calculate_alpha_diversity()
cls.test.calculate_beta_diversity()
cls.test2 = CoalescenceTree()
cls.test2.set_database(os.path.join("sample", "sample_nofrag.db"))
@classmethod
def tearDownClass(cls):
"""
Removes the files from output."
"""
cls.test.clear_calculations()
def testRaisesErrorMismatchParameters(self):
"""
Tests that an error is raised when there is a parameter mismatch
"""
with self.assertRaises(ValueError):
self.test.calculate_goodness_of_fit()
class TestSimulationAnalysisTemporal(unittest.TestCase):
"""Tests that applying multiple times works as expected."""
@classmethod
def setUpClass(cls):
"""Generates the analysis object."""
src = os.path.join("sample", "sample2.db")
dst = os.path.join("output", "sample2.db")
if not os.path.exists(dst):
shutil.copy(src, dst)
cls.tree = CoalescenceTree()
cls.tree.set_database(dst)
cls.tree.wipe_data()
def testTimesWrongFormatError(self):
"""Tests that an error is raised when the times are in the wrong format."""
with self.assertRaises(TypeError):
self.tree.set_speciation_parameters([0.4, 0.6], times=[0.1, 0.2, "notafloat"])
with self.assertRaises(TypeError):
# noinspection PyTypeChecker
self.tree.set_speciation_parameters([0.4, 0.6], times="notafloat")
self.tree.times = []
self.tree.set_speciation_parameters([0.4, 0.6], times=[0, 1, 10])
self.assertEqual([0.0, 1.0, 10.0], self.tree.times)
class TestSimulationAnalysis(unittest.TestCase):
"""
Tests that the simulation can perform all required analyses, and that the correct errors are thrown if the object
does not exist.
"""
@classmethod
def setUpClass(cls):
"""Copies the sample databases and applies a basic set of community parameters."""
src = os.path.join("sample", "sample2.db")
dst = os.path.join("output", "sample2.db")
if os.path.exists(dst):
os.remove(dst)
shutil.copy(src, dst)
cls.tree = CoalescenceTree(logging_level=50)
cls.tree.set_database(dst)
cls.tree.wipe_data()
cls.tree.set_speciation_parameters(
speciation_rates=[0.5, 0.7],
record_spatial="T",
record_fragments=os.path.join("sample", "FragmentsTest.csv"),
sample_file=os.path.join("sample", "SA_samplemaskINT.tif"),
)
cls.tree.apply()
cls.tree.calculate_fragment_richness()
cls.tree.calculate_fragment_octaves()
np.random.seed(100)
def testSetDatabaseErrors(self):
"""Tests that the set database errors are correctly raised."""
sim = Simulation()
c = CoalescenceTree()
with self.assertRaises(RuntimeError):
c.set_database(sim)
c = CoalescenceTree()
with self.assertRaises(IOError):
c.set_database(os.path.join("sample", "failsampledoesntexist.db"))
def testFragmentConfigNoExistError(self):
"""Tests that an error is raised if the fragment config file does not exist."""
tree = CoalescenceTree(self.tree.file)
with self.assertRaises(IOError):
tree.set_speciation_parameters(
speciation_rates=[0.5, 0.7],
record_spatial="T",
record_fragments=os.path.join("sample", "notafragmentconfig.csv"),
sample_file=os.path.join("sample", "SA_samplemaskINT.tif"),
)
with self.assertRaises(IOError):
tree.set_speciation_parameters(
speciation_rates=[0.5, 0.7],
record_spatial="T",
record_fragments=os.path.join("sample", "example_historical_fine.tif"),
sample_file=os.path.join("sample", "SA_samplemaskINT.tif"),
)
def testReadsFragmentsRichness(self):
"""
Tests that the fragment richness can be read correctly
"""
sim_params = self.tree.get_simulation_parameters()
expected_params = dict(
seed=9,
task=1,
output_dir="output",
speciation_rate=0.5,
sigma=2.828427,
tau=2.0,
deme=1,
sample_size=0.1,
max_time=2.0,
dispersal_relative_cost=1.0,
min_num_species=1,
habitat_change_rate=0.0,
gen_since_historical=200.0,
time_config_file="null",
coarse_map_file="sample/SA_sample_coarse.tif",
coarse_map_x=35,
coarse_map_y=41,
coarse_map_x_offset=11,
coarse_map_y_offset=14,
coarse_map_scale=1.0,
fine_map_file="sample/SA_sample_fine.tif",
fine_map_x=13,
fine_map_y=13,
fine_map_x_offset=0,
fine_map_y_offset=0,
sample_file="sample/SA_samplemaskINT.tif",
grid_x=13,
grid_y=13,
sample_x=13,
sample_y=13,
sample_x_offset=0,
sample_y_offset=0,
historical_coarse_map="none",
historical_fine_map="none",
sim_complete=1,
dispersal_method="normal",
m_probability=0.0,
cutoff=0.0,
landscape_type="closed",
protracted=0,
min_speciation_gen=0.0,
max_speciation_gen=0.0,
dispersal_map="none",
)
for key in sim_params.keys():
self.assertEqual(
sim_params[key],
expected_params[key],
msg="Error in {}: {} != {}".format(key, sim_params[key], expected_params[key]),
)
fragment2_richness = ["fragment2", 1, 129]
self.assertEqual(self.tree.get_fragment_richness(fragment="fragment2", reference=1), 129)
self.assertEqual(self.tree.get_fragment_richness(fragment="fragment1", reference=2), 175)
octaves = self.tree.get_fragment_richness()
self.assertListEqual(fragment2_richness, [list(x) for x in octaves if x[0] == "fragment2" and x[1] == 1][0])
expected_fragment_richness = []
for reference in self.tree.get_community_references():
for fragment in self.tree.get_fragment_list(reference):
fragment_richness = self.tree.get_fragment_richness(fragment=fragment, reference=reference)
expected_fragment_richness.append(
{"fragment": fragment, "community_reference": reference, "fragment_richness": fragment_richness}
)
expected_fragment_richness_df = (
pd.DataFrame(expected_fragment_richness)
.sort_values(by=["fragment", "community_reference"])
.reset_index(drop=True)
)
actual_fragment_richness = self.tree.get_fragment_richness_pd().reset_index(drop=True)
assert_frame_equal(expected_fragment_richness_df, actual_fragment_richness, check_like=True)
def testGetsFragmentList(self):
"""
Tests that fetching the list of fragments from FRAGMENT_ABUNDANCES is as expected
"""
fragment_list = self.tree.get_fragment_list()
expected_list = ["fragment1", "fragment2"]
self.assertListEqual(expected_list, fragment_list)
def testReadsFragmentAbundances(self):
"""
Tests that the fragment abundances are correctly read
"""
expected_abundances = [
[610, 1],
[611, 1],
[612, 1],
[613, 1],
[614, 1],
[615, 1],
[616, 1],
[617, 1],
[618, 1],
[619, 1],
]
actual_abundances = self.tree.get_species_abundances(fragment="fragment2", reference=1)
for i, each in enumerate(expected_abundances):
self.assertListEqual(actual_abundances[i], each)
with self.assertRaises(ValueError):
self.tree.get_species_abundances(fragment="fragment2")
expected_fragment_abundances_list = []
for reference in self.tree.get_community_references():
for fragment in self.tree.get_fragment_list(reference):
fragment_abundances = self.tree.get_fragment_abundances(fragment=fragment, reference=reference)
for species_id, abundance in fragment_abundances:
expected_fragment_abundances_list.append(
{
"fragment": fragment,
"community_reference": reference,
"species_id": species_id,
"no_individuals": abundance,
}
)
expected_fragment_abundances = (
pd.DataFrame(expected_fragment_abundances_list)
.sort_values(by=["fragment", "community_reference", "species_id"])
.reset_index(drop=True)
)
actual_fragment_abundances = (
self.tree.get_fragment_abundances_pd()
.sort_values(by=["fragment", "community_reference", "species_id"])
.reset_index(drop=True)
)
assert_frame_equal(expected_fragment_abundances, actual_fragment_abundances, check_like=True)
def testFragmentRichnessRaiseError(self):
"""
Tests that the correct errors are raised when no fragment exists with that name, or with the specified
speciation rate, or time. Also checks SyntaxErrors and sqlite3.Errors when no FRAGMENT_RICHNESS table
exists.
"""
failtree = CoalescenceTree()
failtree.set_database(os.path.join("sample", "failsample.db"))
with self.assertRaises(IOError):
failtree.get_fragment_richness()
with self.assertRaises(IOError):
failtree.get_fragment_richness_pd()
with self.assertRaises(IOError):
self.tree.get_fragment_richness(fragment="fragment4", reference=1)
with self.assertRaises(SyntaxError):
self.tree.get_fragment_richness(fragment="fragment4")
with self.assertRaises(SyntaxError):
self.tree.get_fragment_richness(reference=1)
def testReadsFragmentOctaves(self):
"""
Tests that the fragment octaves can be read correctly.
"""
octaves = self.tree.get_fragment_octaves(fragment="fragment2", reference=1)
octaves2 = self.tree.get_fragment_octaves(fragment="fragment1", reference=1)
all_octaves = self.tree.get_fragment_octaves()
desired = ["fragment1", 1, 0, 173]
self.assertListEqual([0, 128], octaves[0])
self.assertListEqual([0, 173], octaves2[0])
self.assertListEqual(desired, [x for x in all_octaves if x[0] == "fragment1" and x[1] == 1 and x[2] == 0][0])
expected_fragment_octaves_list = []
for reference in self.tree.get_community_references():
fragment_list = self.tree.get_fragment_list(reference)
fragment_list.append("whole")
for fragment in fragment_list:
try:
octaves = self.tree.get_fragment_octaves(fragment=fragment, reference=reference)
for octave, richness in octaves:
expected_fragment_octaves_list.append(
{
"fragment": fragment,
"community_reference": reference,
"octave": octave,
"richness": richness,
}
)
except RuntimeError:
continue
expected_fragment_octaves = (
pd.DataFrame(expected_fragment_octaves_list)
.sort_values(["fragment", "community_reference", "octave"], axis=0)
.reset_index(drop=True)
)
actual_fragment_octaves = (
self.tree.get_fragment_octaves_pd()
.sort_values(["fragment", "community_reference", "octave"], axis=0)
.reset_index(drop=True)
)
assert_frame_equal(expected_fragment_octaves, actual_fragment_octaves, check_like=True)
def testFragmentOctavesRaiseError(self):
"""
Tests that the correct errors are raised for different situations for reading fragment octaves
"""
failtree = CoalescenceTree()
try:
failtree.set_database("sample/failsample.db")
except sqlite3.Error:
pass
with self.assertRaises(sqlite3.Error):
failtree.get_fragment_octaves(fragment="fragment4", reference=100)
with self.assertRaises(RuntimeError):
self.tree.get_fragment_octaves(fragment="fragment4", reference=100)
with self.assertRaises(SyntaxError):
self.tree.get_fragment_octaves(fragment="fragment4")
with self.assertRaises(SyntaxError):
self.tree.get_fragment_octaves(reference=100)
def testFragmentSampling(self):
"""
Tests that sampling from fragments is accurate.
"""
self.assertEqual(
10,
self.tree.sample_fragment_richness(
fragment="fragment1", number_of_individuals=10, n=1, community_reference=2
),
)
self.assertEqual(
10,
self.tree.sample_fragment_richness(
fragment="fragment2", number_of_individuals=10, n=10, community_reference=2
),
)
def testLandscapeSampling(self):
"""Tests that the sampling from the landscape works as intended."""
number_dict = {"fragment1": 3, "fragment2": 10}
np.random.seed(100)
self.assertEqual(
13, self.tree.sample_landscape_richness(number_of_individuals=number_dict, n=1, community_reference=2)
)
self.assertAlmostEqual(
99.9, self.tree.sample_landscape_richness(number_of_individuals=100, n=10, community_reference=1), places=3
)
def testRaisesSamplingErrors(self):
"""Tests that sampling errors are correctly raised"""
number_dict = {"fragment1": 3000000, "fragment2": 10}
with self.assertRaises(KeyError):
self.assertEqual(
13, self.tree.sample_landscape_richness(number_of_individuals=number_dict, n=1, community_reference=2)
)
number_dict2 = {"fragment": 10, "fragment2": 10}
with self.assertRaises(KeyError):
self.assertEqual(
13, self.tree.sample_landscape_richness(number_of_individuals=number_dict2, n=1, community_reference=2)
)
def testSpeciesRichness(self):
"""Tests that the simulation species richness is read correctly."""
actual_species_richness = (
self.tree.get_species_richness_pd().sort_values(by=["community_reference"]).reset_index(drop=True)
)
expected_species_richness_list = []
for reference in self.tree.get_community_references():
expected_species_richness_list.append(
{"community_reference": reference, "richness": self.tree.get_species_richness(reference=reference)}
)
expected_species_richness = pd.DataFrame(expected_species_richness_list)
assert_frame_equal(actual_species_richness, expected_species_richness, check_like=True)
def testOctaves(self):
"""Tests that the simulation octave classes are correctly calculated."""
actual_species_octaves = (
self.tree.get_octaves_pd().sort_values(by=["community_reference", "octave"]).reset_index(drop=True)
)
expected_species_octaves_list = []
for reference in self.tree.get_community_references():
for octave, richness in self.tree.get_octaves(reference):
expected_species_octaves_list.append(
{"community_reference": reference, "octave": octave, "richness": richness}
)
expected_species_octaves = | pd.DataFrame(expected_species_octaves_list) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pdb,importlib,inspect,time,datetime,json
# from PyFin.api import advanceDateByCalendar
# from data.polymerize import DBPolymerize
from data.storage_engine import StorageEngine
import time
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
from financial import factor_earning
from data.model import BalanceMRQ, BalanceTTM, BalanceReport
from data.model import CashFlowTTM, CashFlowReport
from data.model import IndicatorReport
from data.model import IncomeReport, IncomeTTM
from vision.db.signletion_engine import *
from data.sqlengine import sqlEngine
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
# from ultron.cluster.invoke.cache_data import cache_data
class CalcEngine(object):
def __init__(self, name, url, methods=[{'packet':'financial.factor_earning','class':'FactorEarning'},]):
self._name = name
self._methods = methods
self._url = url
def get_trade_date(self, trade_date, n, days=365):
"""
获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。
:param days:
:param trade_date: 当前交易日
:param n:
:return:
"""
syn_util = SyncUtil()
trade_date_sets = syn_util.get_all_trades('001002', '19900101', trade_date)
trade_date_sets = trade_date_sets['TRADEDATE'].values
time_array = datetime.strptime(str(trade_date), "%Y%m%d")
time_array = time_array - timedelta(days=days) * n
date_time = int(datetime.strftime(time_array, "%Y%m%d"))
if str(date_time) < min(trade_date_sets):
# print('date_time %s is out of trade_date_sets' % date_time)
return str(date_time)
else:
while str(date_time) not in trade_date_sets:
date_time = date_time - 1
# print('trade_date pre %s year %s' % (n, date_time))
return str(date_time)
def _func_sets(self, method):
# 私有函数和保护函数过滤
return list(filter(lambda x: not x.startswith('_') and callable(getattr(method,x)), dir(method)))
def loading_data(self, trade_date):
"""
获取基础数据
按天获取当天交易日所有股票的基础数据
:param trade_date: 交易日
:return:
"""
# 转换时间格式
time_array = datetime.strptime(trade_date, "%Y-%m-%d")
trade_date = datetime.strftime(time_array, '%Y%m%d')
# 读取目前涉及到的因子
trade_date_pre_year = self.get_trade_date(trade_date, 1)
trade_date_pre_year_2 = self.get_trade_date(trade_date, 2)
trade_date_pre_year_3 = self.get_trade_date(trade_date, 3)
trade_date_pre_year_4 = self.get_trade_date(trade_date, 4)
trade_date_pre_year_5 = self.get_trade_date(trade_date, 5)
engine = sqlEngine()
columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date']
# Report Data
cash_flow_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport,
[CashFlowReport.LABORGETCASH,
CashFlowReport.FINALCASHBALA,
], dates=[trade_date])
for column in columns:
if column in list(cash_flow_sets.keys()):
cash_flow_sets = cash_flow_sets.drop(column, axis=1)
cash_flow_sets = cash_flow_sets.rename(
columns={'LABORGETCASH': 'goods_sale_and_service_render_cash', # 销售商品、提供劳务收到的现金
'FINALCASHBALA': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额
})
income_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
[IncomeReport.BIZTOTINCO,
IncomeReport.BIZINCO,
IncomeReport.PERPROFIT,
IncomeReport.PARENETP,
IncomeReport.NETPROFIT,
], dates=[trade_date])
for column in columns:
if column in list(income_sets.keys()):
income_sets = income_sets.drop(column, axis=1)
income_sets = income_sets.rename(columns={'NETPROFIT': 'net_profit', # 净利润
'BIZTOTINCO': 'total_operating_revenue', # 营业总收入
'BIZINCO': 'operating_revenue', # 营业收入
'PERPROFIT': 'operating_profit', # 营业利润
'PARENETP': 'np_parent_company_owners', # 归属于母公司所有者的净利润
})
indicator_sets = engine.fetch_fundamentals_pit_extend_company_id(IndicatorReport,
[
IndicatorReport.NETPROFITCUT,
# 扣除非经常损益后的净利润
IndicatorReport.MGTEXPRT
], dates=[trade_date])
for column in columns:
if column in list(indicator_sets.keys()):
indicator_sets = indicator_sets.drop(column, axis=1)
indicator_sets = indicator_sets.rename(columns={'NETPROFITCUT': 'adjusted_profit', # 扣除非经常损益后的净利润
})
balance_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceReport,
[BalanceReport.PARESHARRIGH,
], dates=[trade_date])
for column in columns:
if column in list(balance_sets.keys()):
balance_sets = balance_sets.drop(column, axis=1)
balance_sets = balance_sets.rename(columns={'PARESHARRIGH': 'equities_parent_company_owners', # 归属于母公司股东权益合计
})
income_sets_pre_year_1 = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
[IncomeReport.BIZINCO, # 营业收入
IncomeReport.NETPROFIT, # 净利润
], dates=[trade_date_pre_year])
for column in columns:
if column in list(income_sets_pre_year_1.keys()):
income_sets_pre_year_1 = income_sets_pre_year_1.drop(column, axis=1)
income_sets_pre_year_1 = income_sets_pre_year_1.rename(columns={'NETPROFIT': 'net_profit_pre_year_1', # 净利润
'BIZINCO': 'operating_revenue_pre_year_1',
# 营业收入
})
income_sets_pre_year_2 = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
[IncomeReport.BIZINCO,
IncomeReport.NETPROFIT,
], dates=[trade_date_pre_year_2])
for column in columns:
if column in list(income_sets_pre_year_2.keys()):
income_sets_pre_year_2 = income_sets_pre_year_2.drop(column, axis=1)
income_sets_pre_year_2 = income_sets_pre_year_2.rename(columns={'NETPROFIT': 'net_profit_pre_year_2', # 净利润
'BIZINCO': 'operating_revenue_pre_year_2',
# 营业收入
})
income_sets_pre_year_3 = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
[IncomeReport.BIZINCO,
IncomeReport.NETPROFIT,
], dates=[trade_date_pre_year_3])
for column in columns:
if column in list(income_sets_pre_year_3.keys()):
income_sets_pre_year_3 = income_sets_pre_year_3.drop(column, axis=1)
income_sets_pre_year_3 = income_sets_pre_year_3.rename(columns={'NETPROFIT': 'net_profit_pre_year_3', # 净利润
'BIZINCO': 'operating_revenue_pre_year_3',
# 营业收入
})
income_sets_pre_year_4 = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
[IncomeReport.BIZINCO,
IncomeReport.NETPROFIT,
], dates=[trade_date_pre_year_4])
for column in columns:
if column in list(income_sets_pre_year_4.keys()):
income_sets_pre_year_4 = income_sets_pre_year_4.drop(column, axis=1)
income_sets_pre_year_4 = income_sets_pre_year_4.rename(columns={'NETPROFIT': 'net_profit_pre_year_4', # 净利润
'BIZINCO': 'operating_revenue_pre_year_4',
# 营业收入
})
tp_earning = pd.merge(cash_flow_sets, income_sets, how='outer', on='security_code')
tp_earning = pd.merge(indicator_sets, tp_earning, how='outer', on='security_code')
tp_earning = pd.merge(balance_sets, tp_earning, how='outer', on='security_code')
tp_earning = pd.merge(income_sets_pre_year_1, tp_earning, how='outer', on='security_code')
tp_earning = pd.merge(income_sets_pre_year_2, tp_earning, how='outer', on='security_code')
tp_earning = pd.merge(income_sets_pre_year_3, tp_earning, how='outer', on='security_code')
tp_earning = | pd.merge(income_sets_pre_year_4, tp_earning, how='outer', on='security_code') | pandas.merge |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 25 2020
@author: <NAME>
https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE122960
"""
#%%
import scipy.sparse as sp_sparse
import tables
import pandas as pd
import os
import glob
import scanpy as sc
from matplotlib import pyplot as plt
import matplotlib as mpl
import numpy as np
#%%
sc.settings.figdir = 'data/cluster-plots/gse1229560_ipf/'
sc.settings.verbosity = 4
mpl.rcParams.update(mpl.rcParamsDefault)
mpl.rcParams.update({
'font.sans-serif': 'Arial',
'font.family': 'sans-serif',
'axes.titlesize': 18,
'axes.labelsize': 14,
})
#%%
def get_series_from_h5(filename):
with tables.open_file(filename, 'r') as f:
mat_group = f.get_node(f.root, 'GRCh38')
barcodes = f.get_node(mat_group, 'barcodes').read().astype(str)
gene_names = f.get_node(mat_group, 'gene_names').read().astype(str)
data = getattr(mat_group, 'data').read()
indices = getattr(mat_group, 'indices').read()
indptr = getattr(mat_group, 'indptr').read()
shape = getattr(mat_group, 'shape').read()
matrix = sp_sparse.csc_matrix((data, indices, indptr), shape=shape)
return matrix, barcodes, gene_names.tolist()
#%%
p = 'data/raw-data/GSE122960_RAW/*filtered_*.h5'
dfs = []
genes = []
cells = []
donors = []
globs = [i for i in glob.glob(p) if 'IPF' in i or 'Cryobiopsy' in i]
for f in globs:
print(f)
matrix, barcodes, gene_names = get_series_from_h5(f)
donor_id = '_'.join(f.split('/')[-1].split('_')[0:3])
dfs.append(matrix)
cells.append(barcodes)
donors.append([donor_id]*barcodes.shape[0])
genes.append(gene_names)
#%%
#Verifies all gene names are present, in the same order, for each matrix
def verify_genes(gene_names):
gene_names_df = pd.DataFrame(genes)
assert (np.array([gene_names_df[i].value_counts().iloc[0] for i in gene_names_df]) < len(gene_names)).sum() == 0
verify_genes(genes)
#%% Smoker v. Non
#Never = 0, Former = 1, Active = 2
smoker_v_non_donor_table = {
'Donor_01': 0,
'Donor_02': 1,
'Donor_03': 0,
'Donor_04': 0,
'Donor_05': 2,
'Donor_06': 0,
'Donor_07': 2,
'Donor_08': 0 }
smoker_v_non_disease_table = {
'IPF_01': 1,
'IPF_02': 0,
'IPF_03': 1,
'IPF_04': 0,
'HP_01': 0,
'SSc-ILD_01': 0,
'SSc-ILD_02': 0,
'Myositis-ILD_01': 0,
'Cryobiopsy_01': 1
}
#%% Collect into AnnData object
adata = sc.AnnData(sp_sparse.hstack(dfs).T)
adata.var = pd.DataFrame(genes[0], index=genes[0], columns=['name'])
adata.var_names_make_unique()
obs = pd.DataFrame({'barcodes': np.hstack(cells), 'donor_id': np.hstack(donors)})
obs['donor-barcode'] = obs['barcodes'] + '_' + obs['donor_id']
obs = obs.set_index('donor-barcode')
adata.obs = obs
adata.obs['donor'] = adata.obs.donor_id.str.split('_', n=1, expand=True)[1]
#%%
adata.obs['smoker'] = adata.obs.donor.map(smoker_v_non_donor_table)
active_smokers = adata[adata.obs['smoker'] == 2].copy()
never_smokers = adata[adata.obs['smoker'] == 0].copy()
#%% Normalize data
sc.pp.normalize_total(adata, target_sum=1e6)
sc.pp.log1p(adata, base=2)
#%%
def exploratory_plots(adata):
# Check normalization
num_non_int = (adata.to_df().applymap(float.is_integer) == False).sum().sum()
print('Num non-int: ', num_non_int)
plt.figure()
sc.pp.filter_cells(adata, min_genes=0)
sc.pp.filter_genes(adata, min_cells=0)
plt.hist(adata.obs.n_genes, bins=500)
plt.title('IPF per cell')
print('Min:', adata.obs['n_genes'].min())
minimum = adata.obs['n_genes'].min()
maximum = adata.obs['n_genes'].max()
print('Max:', maximum)
plt.xlabel('# Genes. Min:' + str(minimum))
plt.ylabel('# Cells')
plt.figure()
plt.hist(adata.var.n_cells, bins=500)
plt.title('IPF per gene')
print('Min:', adata.var['n_cells'].min())
sc.pl.pca_variance_ratio(adata, log=True)
exploratory_plots(adata)
#%%
sc.pp.filter_cells(adata, min_genes=500)
sc.pp.highly_variable_genes(adata)
sc.tl.pca(adata)
sc.pp.neighbors(adata)
#%%
LEARNING_RATE = 1000
EARLY_EXAGGERATION = 12
RESOLUTION = 1.25
PERPLEXITY=130
sc.tl.tsne(adata, learning_rate=LEARNING_RATE, n_jobs=8, early_exaggeration=EARLY_EXAGGERATION, perplexity=PERPLEXITY)
sc.tl.leiden(adata, resolution=RESOLUTION)
params = {'learning_rate': LEARNING_RATE,
'early_exaggeration':EARLY_EXAGGERATION,
'resolution': RESOLUTION,
'perplexity': PERPLEXITY,
'genes': 'all',
'files': globs}
pd.Series(params).to_csv(os.path.join(sc.settings.figdir, 'params.txt'))
adata.write(os.path.join(sc.settings.figdir, 'adata.h5ad'))
#%%
adata = sc.read_h5ad(os.path.join(sc.settings.figdir, 'adata.h5ad'))
params = pd.read_csv(os.path.join(sc.settings.figdir, 'params.txt'), index_col=0).to_dict('dict')['0']
#%%
markers = pd.read_csv('data/highlighted_genes.csv', header=None, names=['gene', 'cluster'])
markers['gene'] = markers['gene'].str.upper()
markers = markers[markers['gene'].isin(gene_names)]
markers['title'] = markers['gene'] + '+: ' + markers['cluster']
markers = markers.set_index('gene')
markers.loc['PTPRC']['title'] = 'PTPRC (CD45)+: Immune Cells'
markers.loc['leiden'] = ['Leiden', 'Clusters']
addl_genes = pd.read_csv('data/additional_ipf_genes.csv', header=None)
addl_genes['title'] = addl_genes[0]
addl_genes = addl_genes.set_index(0)
markers = markers.append(addl_genes)
#%%
for i, g in markers.iterrows():
sc.pl.tsne(adata, color=i,
title=g['title'],
color_map='plasma',
size=25,
save='_' + i + '_all.pdf',
show=False)
#%%
sc.tl.rank_genes_groups(adata, 'leiden', method='t-test', n_genes=20)
| pd.DataFrame(adata.uns['rank_genes_groups']['names']) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.